xref: /titanic_41/usr/src/uts/intel/io/agpgart/agpgart.c (revision 0cfdb6036e046270988a17ac442e4d717d426a44)
1 /*
2  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
3  * Use is subject to license terms.
4  */
5 /*
6  * Portions Philip Brown phil@bolthole.com Dec 2001
7  */
8 
9 #pragma ident	"%Z%%M%	%I%	%E% SMI"
10 
11 /*
12  * agpgart driver
13  *
14  * This driver is primary targeted at providing memory support for INTEL
15  * AGP device, INTEL memory less video card, and AMD64 cpu GART devices.
16  * So there are four main architectures, ARC_IGD810, ARC_IGD830, ARC_INTELAGP,
17  * ARC_AMD64AGP to agpgart driver. However, the memory
18  * interfaces are the same for these architectures. The difference is how to
19  * manage the hardware GART table for them.
20  *
21  * For large memory allocation, this driver use direct mapping to userland
22  * application interface to save kernel virtual memory .
23  */
24 
25 #include <sys/types.h>
26 #include <sys/pci.h>
27 #include <sys/systm.h>
28 #include <sys/conf.h>
29 #include <sys/file.h>
30 #include <sys/kstat.h>
31 #include <sys/stat.h>
32 #include <sys/modctl.h>
33 #include <sys/ddi.h>
34 #include <sys/sunddi.h>
35 #include <sys/sunldi.h>
36 #include <sys/policy.h>
37 #include <sys/ddidevmap.h>
38 #include <vm/seg_dev.h>
39 #include <sys/pmem.h>
40 #include <sys/agpgart.h>
41 #include <sys/agp/agpdefs.h>
42 #include <sys/agp/agpgart_impl.h>
43 #include <sys/agp/agpamd64gart_io.h>
44 #include <sys/agp/agpmaster_io.h>
45 #include <sys/agp/agptarget_io.h>
46 
47 /* Dynamic debug support */
48 int agp_debug_var = 0;
49 #define	AGPDB_PRINT1(fmt)	if (agp_debug_var == 1) cmn_err fmt
50 #define	AGPDB_PRINT2(fmt)	if (agp_debug_var >= 1) cmn_err fmt
51 
52 /* Driver global softstate handle */
53 static void *agpgart_glob_soft_handle;
54 
55 #define	MAX_INSTNUM			16
56 
57 #define	AGP_DEV2INST(devt)	(getminor((devt)) >> 4)
58 #define	AGP_INST2MINOR(instance)	((instance) << 4)
59 #define	IS_INTEL_830(type)	((type) == ARC_IGD830)
60 #define	IS_TRUE_AGP(type)	(((type) == ARC_INTELAGP) || \
61 	((type) == ARC_AMD64AGP))
62 
63 #define	agpinfo_default_to_32(v, v32)	\
64 	{				\
65 		(v32).agpi32_version = (v).agpi_version;	\
66 		(v32).agpi32_devid = (v).agpi_devid;	\
67 		(v32).agpi32_mode = (v).agpi_mode;	\
68 		(v32).agpi32_aperbase = (v).agpi_aperbase;	\
69 		(v32).agpi32_apersize = (v).agpi_apersize;	\
70 		(v32).agpi32_pgtotal = (v).agpi_pgtotal;	\
71 		(v32).agpi32_pgsystem = (v).agpi_pgsystem;	\
72 		(v32).agpi32_pgused = (v).agpi_pgused;	\
73 	}
74 
75 static ddi_dma_attr_t agpgart_dma_attr = {
76 	DMA_ATTR_V0,
77 	0U,				/* dma_attr_addr_lo */
78 	0xffffffffU,			/* dma_attr_addr_hi */
79 	0xffffffffU,			/* dma_attr_count_max */
80 	(uint64_t)AGP_PAGE_SIZE,	/* dma_attr_align */
81 	1,				/* dma_attr_burstsizes */
82 	1,				/* dma_attr_minxfer */
83 	0xffffffffU,			/* dma_attr_maxxfer */
84 	0xffffffffU,			/* dma_attr_seg */
85 	1,				/* dma_attr_sgllen, variable */
86 	4,				/* dma_attr_granular */
87 	0				/* dma_attr_flags */
88 };
89 
90 /*
91  * AMD64 supports gart table above 4G. See alloc_gart_table.
92  */
93 static ddi_dma_attr_t garttable_dma_attr = {
94 	DMA_ATTR_V0,
95 	0U,				/* dma_attr_addr_lo */
96 	0xffffffffU,			/* dma_attr_addr_hi */
97 	0xffffffffU,			/* dma_attr_count_max */
98 	(uint64_t)AGP_PAGE_SIZE,	/* dma_attr_align */
99 	1,				/* dma_attr_burstsizes */
100 	1,				/* dma_attr_minxfer */
101 	0xffffffffU,			/* dma_attr_maxxfer */
102 	0xffffffffU,			/* dma_attr_seg */
103 	1,				/* dma_attr_sgllen, variable */
104 	4,				/* dma_attr_granular */
105 	0				/* dma_attr_flags */
106 };
107 
108 /*
109  * AGPGART table need a physical contiguous memory. To assure that
110  * each access to gart table is strongly ordered and uncachable,
111  * we use DDI_STRICTORDER_ACC.
112  */
113 static ddi_device_acc_attr_t gart_dev_acc_attr = {
114 	DDI_DEVICE_ATTR_V0,
115 	DDI_NEVERSWAP_ACC,
116 	DDI_STRICTORDER_ACC	/* must be DDI_STRICTORDER_ACC */
117 };
118 
119 /*
120  * AGP memory is usually used as texture memory or for a framebuffer, so we
121  * can set the memory attribute to write combining. Video drivers will
122  * determine the frame buffer attributes, for example the memory is write
123  * combinging or non-cachable. However, the interface between Xorg and agpgart
124  * driver to support attribute selcetion doesn't exist yet. So we set agp memory
125  * to non-cachable by default now. This attribute might be overridden
126  * by MTTR in X86.
127  */
128 static ddi_device_acc_attr_t mem_dev_acc_attr = {
129 	DDI_DEVICE_ATTR_V0,
130 	DDI_NEVERSWAP_ACC,
131 	DDI_STRICTORDER_ACC	/* Can be DDI_MERGING_OK_ACC */
132 };
133 
134 static keytable_ent_t *
135 agp_find_bound_keyent(agpgart_softstate_t *softstate, uint32_t pg_offset);
136 static void
137 amd64_gart_unregister(amd64_garts_dev_t *cpu_garts);
138 
139 
140 static void
141 agp_devmap_unmap(devmap_cookie_t handle, void *devprivate,
142     offset_t off, size_t len, devmap_cookie_t new_handle1,
143     void **new_devprivate1, devmap_cookie_t new_handle2,
144     void **new_devprivate2)
145 {
146 
147 	struct keytable_ent *mementry;
148 	agpgart_softstate_t *softstate;
149 	agpgart_ctx_t *ctxp, *newctxp1, *newctxp2;
150 
151 	ASSERT(AGP_ALIGNED(len) && AGP_ALIGNED(off));
152 	ASSERT(devprivate);
153 	ASSERT(handle);
154 
155 	ctxp = (agpgart_ctx_t *)devprivate;
156 	softstate = ctxp->actx_sc;
157 	ASSERT(softstate);
158 
159 	if (new_handle1 != NULL) {
160 		newctxp1 = kmem_zalloc(sizeof (agpgart_ctx_t), KM_SLEEP);
161 		newctxp1->actx_sc = softstate;
162 		newctxp1->actx_off = ctxp->actx_off;
163 		*new_devprivate1 = newctxp1;
164 	}
165 
166 	if (new_handle2 != NULL) {
167 		newctxp2 = kmem_zalloc(sizeof (agpgart_ctx_t), KM_SLEEP);
168 		newctxp2->actx_sc = softstate;
169 		newctxp2->actx_off = off + len;
170 		*new_devprivate2 = newctxp2;
171 	}
172 
173 	mutex_enter(&softstate->asoft_instmutex);
174 	if ((new_handle1 == NULL) && (new_handle2 == NULL)) {
175 		mementry =
176 		    agp_find_bound_keyent(softstate, AGP_BYTES2PAGES(off));
177 		ASSERT(mementry);
178 		mementry->kte_refcnt--;
179 	} else if ((new_handle1 != NULL) && (new_handle2 != NULL)) {
180 		mementry =
181 		    agp_find_bound_keyent(softstate, AGP_BYTES2PAGES(off));
182 		ASSERT(mementry);
183 		mementry->kte_refcnt++;
184 	}
185 	ASSERT(mementry->kte_refcnt >= 0);
186 	mutex_exit(&softstate->asoft_instmutex);
187 	kmem_free(ctxp, sizeof (struct agpgart_ctx));
188 }
189 
190 /*ARGSUSED*/
191 static int
192 agp_devmap_map(devmap_cookie_t handle, dev_t dev,
193     uint_t flags, offset_t offset, size_t len, void **new_devprivate)
194 {
195 	agpgart_softstate_t *softstate;
196 	int instance;
197 	struct keytable_ent *mementry;
198 	agpgart_ctx_t *newctxp;
199 
200 	ASSERT(handle);
201 	instance = AGP_DEV2INST(dev);
202 	softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
203 	if (softstate == NULL) {
204 		AGPDB_PRINT2((CE_WARN, "agp_devmap_map: get soft state err"));
205 		return (ENXIO);
206 	}
207 
208 	ASSERT(softstate);
209 	ASSERT(mutex_owned(&softstate->asoft_instmutex));
210 	ASSERT(len);
211 	ASSERT(AGP_ALIGNED(offset) && AGP_ALIGNED(len));
212 
213 	mementry =
214 	    agp_find_bound_keyent(softstate, AGP_BYTES2PAGES(offset));
215 	ASSERT(mementry);
216 	mementry->kte_refcnt++;
217 	ASSERT(mementry->kte_refcnt >= 0);
218 	newctxp = kmem_zalloc(sizeof (agpgart_ctx_t), KM_SLEEP);
219 	newctxp->actx_off = offset;
220 	newctxp->actx_sc = softstate;
221 	*new_devprivate = newctxp;
222 
223 	return (0);
224 }
225 
226 /*ARGSUSED*/
227 static int agp_devmap_dup(devmap_cookie_t handle, void *devprivate,
228     devmap_cookie_t new_handle, void **new_devprivate)
229 {
230 	struct keytable_ent *mementry;
231 	agpgart_ctx_t *newctxp, *ctxp;
232 	agpgart_softstate_t *softstate;
233 
234 	ASSERT(devprivate);
235 	ASSERT(handle && new_handle);
236 
237 	ctxp = (agpgart_ctx_t *)devprivate;
238 	ASSERT(AGP_ALIGNED(ctxp->actx_off));
239 
240 	newctxp = kmem_zalloc(sizeof (agpgart_ctx_t), KM_SLEEP);
241 	newctxp->actx_off = ctxp->actx_off;
242 	newctxp->actx_sc = ctxp->actx_sc;
243 	softstate = (agpgart_softstate_t *)newctxp->actx_sc;
244 
245 	mutex_enter(&softstate->asoft_instmutex);
246 	mementry = agp_find_bound_keyent(softstate,
247 	    AGP_BYTES2PAGES(newctxp->actx_off));
248 	mementry->kte_refcnt++;
249 	ASSERT(mementry->kte_refcnt >= 0);
250 	mutex_exit(&softstate->asoft_instmutex);
251 	*new_devprivate = newctxp;
252 
253 	return (0);
254 }
255 
256 struct devmap_callback_ctl agp_devmap_cb = {
257 	DEVMAP_OPS_REV,		/* rev */
258 	agp_devmap_map,		/* map */
259 	NULL,			/* access */
260 	agp_devmap_dup,		/* dup */
261 	agp_devmap_unmap,	/* unmap */
262 };
263 
264 /*
265  * agp_master_regis_byname()
266  *
267  * Description:
268  * 	Open the AGP master device node by device path name and
269  * 	register the device handle for later operations.
270  * 	We check all possible driver instance from 0
271  * 	to MAX_INSTNUM because the master device could be
272  * 	at any instance number. Only one AGP master is supported.
273  *
274  * Arguments:
275  * 	master_hdlp		AGP master device LDI handle pointer
276  *	agpgart_l		AGPGART driver LDI identifier
277  *
278  * Returns:
279  * 	-1			failed
280  * 	0			success
281  */
282 static int
283 agp_master_regis_byname(ldi_handle_t *master_hdlp, ldi_ident_t agpgart_li)
284 {
285 	int	i;
286 	char	buf[MAXPATHLEN];
287 
288 	ASSERT(master_hdlp);
289 	ASSERT(agpgart_li);
290 
291 	/*
292 	 * Search all possible instance numbers for the agp master device.
293 	 * Only one master device is supported now, so the search ends
294 	 * when one master device is found.
295 	 */
296 	for (i = 0; i < MAX_INSTNUM; i++) {
297 		(void) snprintf(buf, MAXPATHLEN, "%s%d", AGPMASTER_DEVLINK, i);
298 		if ((ldi_open_by_name(buf, 0, kcred,
299 		    master_hdlp, agpgart_li)))
300 			continue;
301 		AGPDB_PRINT1((CE_NOTE,
302 		    "master device found: instance number=%d", i));
303 		break;
304 
305 	}
306 
307 	/* AGP master device not found */
308 	if (i == MAX_INSTNUM)
309 		return (-1);
310 
311 	return (0);
312 }
313 
314 /*
315  * agp_target_regis_byname()
316  *
317  * Description:
318  * 	This function opens agp bridge device node by
319  * 	device path name and registers the device handle
320  * 	for later operations.
321  * 	We check driver instance from 0 to MAX_INSTNUM
322  * 	because the master device could be at any instance
323  * 	number. Only one agp target is supported.
324  *
325  *
326  * Arguments:
327  *	target_hdlp		AGP target device LDI handle pointer
328  *	agpgart_l		AGPGART driver LDI identifier
329  *
330  * Returns:
331  * 	-1			failed
332  * 	0			success
333  */
334 static int
335 agp_target_regis_byname(ldi_handle_t *target_hdlp, ldi_ident_t agpgart_li)
336 {
337 	int	i;
338 	char	buf[MAXPATHLEN];
339 
340 	ASSERT(target_hdlp);
341 	ASSERT(agpgart_li);
342 
343 	for (i = 0; i < MAX_INSTNUM; i++) {
344 		(void) snprintf(buf, MAXPATHLEN, "%s%d", AGPTARGET_DEVLINK, i);
345 		if ((ldi_open_by_name(buf, 0, kcred,
346 		    target_hdlp, agpgart_li)))
347 			continue;
348 
349 		AGPDB_PRINT1((CE_NOTE,
350 		    "bridge device found: instance number=%d", i));
351 		break;
352 
353 	}
354 
355 	/* AGP bridge device not found */
356 	if (i == MAX_INSTNUM) {
357 		AGPDB_PRINT2((CE_WARN, "bridge device not found"));
358 		return (-1);
359 	}
360 
361 	return (0);
362 }
363 
364 /*
365  * amd64_gart_regis_byname()
366  *
367  * Description:
368  * 	Open all amd64 gart device nodes by deice path name and
369  * 	register the device handles for later operations. Each cpu
370  * 	has its own amd64 gart device.
371  *
372  * Arguments:
373  * 	cpu_garts		cpu garts device list header
374  *	agpgart_l		AGPGART driver LDI identifier
375  *
376  * Returns:
377  * 	-1			failed
378  * 	0			success
379  */
380 static int
381 amd64_gart_regis_byname(amd64_garts_dev_t *cpu_garts, ldi_ident_t agpgart_li)
382 {
383 	amd64_gart_dev_list_t	*gart_list;
384 	int			i;
385 	char			buf[MAXPATHLEN];
386 	ldi_handle_t		gart_hdl;
387 	int			ret;
388 
389 	ASSERT(cpu_garts);
390 	ASSERT(agpgart_li);
391 
392 	/*
393 	 * Search all possible instance numbers for the gart devices.
394 	 * There can be multiple on-cpu gart devices for Opteron server.
395 	 */
396 	for (i = 0; i < MAX_INSTNUM; i++) {
397 		(void) snprintf(buf, MAXPATHLEN, "%s%d", CPUGART_DEVLINK, i);
398 		ret = ldi_open_by_name(buf, 0, kcred,
399 		    &gart_hdl, agpgart_li);
400 
401 		if (ret == ENODEV)
402 			continue;
403 		else if (ret != 0) { /* There was an error opening the device */
404 			amd64_gart_unregister(cpu_garts);
405 			return (ret);
406 		}
407 
408 		AGPDB_PRINT1((CE_NOTE,
409 		    "amd64 gart device found: instance number=%d", i));
410 
411 		gart_list = (amd64_gart_dev_list_t *)
412 		    kmem_zalloc(sizeof (amd64_gart_dev_list_t), KM_SLEEP);
413 
414 		/* Add new item to the head of the gart device list */
415 		gart_list->gart_devhdl = gart_hdl;
416 		gart_list->next = cpu_garts->gart_dev_list_head;
417 		cpu_garts->gart_dev_list_head = gart_list;
418 		cpu_garts->gart_device_num++;
419 	}
420 
421 	if (cpu_garts->gart_device_num == 0)
422 		return (ENODEV);
423 	return (0);
424 }
425 
426 /*
427  * Unregister agp master device handle
428  */
429 static void
430 agp_master_unregister(ldi_handle_t *master_hdlp)
431 {
432 	ASSERT(master_hdlp);
433 
434 	if (master_hdlp) {
435 		(void) ldi_close(*master_hdlp, 0, kcred);
436 		*master_hdlp = NULL;
437 	}
438 }
439 
440 /*
441  * Unregister agp bridge device handle
442  */
443 static void
444 agp_target_unregister(ldi_handle_t *target_hdlp)
445 {
446 	if (target_hdlp) {
447 		(void) ldi_close(*target_hdlp, 0, kcred);
448 		*target_hdlp = NULL;
449 	}
450 }
451 
452 /*
453  * Unregister all amd64 gart device handles
454  */
455 static void
456 amd64_gart_unregister(amd64_garts_dev_t *cpu_garts)
457 {
458 	amd64_gart_dev_list_t	*gart_list;
459 	amd64_gart_dev_list_t	*next;
460 
461 	ASSERT(cpu_garts);
462 
463 	for (gart_list = cpu_garts->gart_dev_list_head;
464 	    gart_list; gart_list = next) {
465 
466 		ASSERT(gart_list->gart_devhdl);
467 		(void) ldi_close(gart_list->gart_devhdl, 0, kcred);
468 		next = gart_list->next;
469 		/* Free allocated memory */
470 		kmem_free(gart_list, sizeof (amd64_gart_dev_list_t));
471 	}
472 	cpu_garts->gart_dev_list_head = NULL;
473 	cpu_garts->gart_device_num = 0;
474 }
475 
476 /*
477  * lyr_detect_master_type()
478  *
479  * Description:
480  * 	This function gets agp master type by querying agp master device.
481  *
482  * Arguments:
483  * 	master_hdlp		agp master device ldi handle pointer
484  *
485  * Returns:
486  * 	-1			unsupported device
487  * 	DEVICE_IS_I810		i810 series
488  * 	DEVICE_IS_I810		i830 series
489  * 	DEVICE_IS_AGP		true agp master
490  */
491 static int
492 lyr_detect_master_type(ldi_handle_t *master_hdlp)
493 {
494 	int vtype;
495 	int err;
496 
497 	ASSERT(master_hdlp);
498 
499 	/* ldi_ioctl(agpmaster) */
500 	err = ldi_ioctl(*master_hdlp, DEVICE_DETECT,
501 	    (intptr_t)&vtype, FKIOCTL, kcred, 0);
502 	if (err) /* Unsupported graphics device */
503 		return (-1);
504 	return (vtype);
505 }
506 
507 /*
508  * devtect_target_type()
509  *
510  * Description:
511  * 	This function gets the host bridge chipset type by querying the agp
512  *	target device.
513  *
514  * Arguments:
515  * 	target_hdlp		agp target device LDI handle pointer
516  *
517  * Returns:
518  * 	CHIP_IS_INTEL		Intel agp chipsets
519  * 	CHIP_IS_AMD		AMD agp chipset
520  * 	-1			unsupported chipset
521  */
522 static int
523 lyr_detect_target_type(ldi_handle_t *target_hdlp)
524 {
525 	int btype;
526 	int err;
527 
528 	ASSERT(target_hdlp);
529 
530 	err = ldi_ioctl(*target_hdlp, CHIP_DETECT, (intptr_t)&btype,
531 	    FKIOCTL, kcred, 0);
532 	if (err)	/* Unsupported bridge device */
533 		return (-1);
534 	return (btype);
535 }
536 
537 /*
538  * lyr_init()
539  *
540  * Description:
541  * 	This function detects the  graphics system architecture and
542  * 	registers all relative device handles in a global structure
543  * 	"agp_regdev". Then it stores the system arc type in driver
544  * 	soft state.
545  *
546  * Arguments:
547  *	agp_regdev		AGP devices registration struct pointer
548  *	agpgart_l		AGPGART driver LDI identifier
549  *
550  * Returns:
551  * 	0	System arc supported and agp devices registration successed.
552  * 	-1	System arc not supported or device registration failed.
553  */
554 int
555 lyr_init(agp_registered_dev_t *agp_regdev, ldi_ident_t agpgart_li)
556 {
557 	ldi_handle_t *master_hdlp;
558 	ldi_handle_t *target_hdlp;
559 	amd64_garts_dev_t *garts_dev;
560 	int card_type, chip_type;
561 	int ret;
562 
563 	ASSERT(agp_regdev);
564 
565 	bzero(agp_regdev, sizeof (agp_registered_dev_t));
566 	agp_regdev->agprd_arctype = ARC_UNKNOWN;
567 	/*
568 	 * Register agp devices, assuming all instances attached, and
569 	 * detect which agp architucture this server belongs to. This
570 	 * must be done before the agpgart driver starts to use layered
571 	 * driver interfaces.
572 	 */
573 	master_hdlp = &agp_regdev->agprd_masterhdl;
574 	target_hdlp = &agp_regdev->agprd_targethdl;
575 	garts_dev = &agp_regdev->agprd_cpugarts;
576 
577 	/* Check whether the system is amd64 arc */
578 	if ((ret = amd64_gart_regis_byname(garts_dev, agpgart_li)) == ENODEV) {
579 		/* No amd64 gart devices */
580 		AGPDB_PRINT1((CE_NOTE,
581 		    "lyr_init: this is not an amd64 system"));
582 		if (agp_master_regis_byname(master_hdlp, agpgart_li)) {
583 			AGPDB_PRINT2((CE_WARN,
584 			    "lyr_init: register master device unsuccessful"));
585 			goto err1;
586 		}
587 		if (agp_target_regis_byname(target_hdlp, agpgart_li)) {
588 			AGPDB_PRINT2((CE_WARN,
589 			    "lyr_init: register target device unsuccessful"));
590 			goto err2;
591 		}
592 		card_type = lyr_detect_master_type(master_hdlp);
593 		/*
594 		 * Detect system arc by master device. If it is a intel
595 		 * integrated device, finish the detection successfully.
596 		 */
597 		switch (card_type) {
598 		case DEVICE_IS_I810:	/* I810 likewise graphics */
599 			AGPDB_PRINT1((CE_NOTE,
600 			    "lyr_init: the system is Intel 810 arch"));
601 			agp_regdev->agprd_arctype = ARC_IGD810;
602 			return (0);
603 		case DEVICE_IS_I830:	/* I830 likewise graphics */
604 			AGPDB_PRINT1((CE_NOTE,
605 			    "lyr_init: the system is Intel 830 arch"));
606 			agp_regdev->agprd_arctype = ARC_IGD830;
607 			return (0);
608 		case DEVICE_IS_AGP:	/* AGP graphics */
609 			break;
610 		default:		/* Non IGD/AGP graphics */
611 			AGPDB_PRINT2((CE_WARN,
612 			    "lyr_init: non-supported master device"));
613 			goto err3;
614 		}
615 
616 		chip_type = lyr_detect_target_type(target_hdlp);
617 
618 		/* Continue to detect AGP arc by target device */
619 		switch (chip_type) {
620 		case CHIP_IS_INTEL:	/* Intel chipset */
621 			AGPDB_PRINT1((CE_NOTE,
622 			    "lyr_init: Intel AGP arch detected"));
623 			agp_regdev->agprd_arctype = ARC_INTELAGP;
624 			return (0);
625 		case CHIP_IS_AMD:	/* AMD chipset */
626 			AGPDB_PRINT2((CE_WARN,
627 			    "lyr_init: no cpu gart, but have AMD64 chipsets"));
628 			goto err3;
629 		default:		/* Non supported chipset */
630 			AGPDB_PRINT2((CE_WARN,
631 			    "lyr_init: detection can not continue"));
632 			goto err3;
633 		}
634 
635 	}
636 
637 	if (ret)
638 		return (-1); /* Errors in open amd64 cpu gart devices */
639 
640 	/*
641 	 * AMD64 cpu gart device exsits, continue detection
642 	 */
643 	if (agp_master_regis_byname(master_hdlp, agpgart_li)) {
644 		AGPDB_PRINT1((CE_NOTE, "lyr_init: no AGP master in amd64"));
645 		goto err1;
646 	}
647 
648 	if (agp_target_regis_byname(target_hdlp, agpgart_li)) {
649 		AGPDB_PRINT1((CE_NOTE,
650 		    "lyr_init: no AGP bridge"));
651 		goto err2;
652 	}
653 
654 	AGPDB_PRINT1((CE_NOTE,
655 	    "lyr_init: the system is AMD64 AGP architecture"));
656 
657 	agp_regdev->agprd_arctype = ARC_AMD64AGP;
658 
659 	return (0); /* Finished successfully */
660 
661 err3:
662 	agp_target_unregister(&agp_regdev->agprd_targethdl);
663 err2:
664 	agp_master_unregister(&agp_regdev->agprd_masterhdl);
665 err1:
666 	agp_regdev->agprd_arctype = ARC_UNKNOWN;
667 	return (-1);
668 }
669 
670 void
671 lyr_end(agp_registered_dev_t *agp_regdev)
672 {
673 	ASSERT(agp_regdev);
674 
675 	switch (agp_regdev->agprd_arctype) {
676 	case ARC_IGD810:
677 	case ARC_IGD830:
678 	case ARC_INTELAGP:
679 		agp_master_unregister(&agp_regdev->agprd_masterhdl);
680 		agp_target_unregister(&agp_regdev->agprd_targethdl);
681 
682 		return;
683 	case ARC_AMD64AGP:
684 		agp_master_unregister(&agp_regdev->agprd_masterhdl);
685 		agp_target_unregister(&agp_regdev->agprd_targethdl);
686 		amd64_gart_unregister(&agp_regdev->agprd_cpugarts);
687 
688 		return;
689 	default:
690 		ASSERT(0);
691 		return;
692 	}
693 }
694 
695 int
696 lyr_get_info(agp_kern_info_t *info, agp_registered_dev_t *agp_regdev)
697 {
698 	ldi_handle_t hdl;
699 	igd_info_t value1;
700 	i_agp_info_t value2;
701 	size_t prealloc_size;
702 	int err;
703 
704 	ASSERT(info);
705 	ASSERT(agp_regdev);
706 
707 	switch (agp_regdev->agprd_arctype) {
708 	case ARC_IGD810:
709 		hdl = agp_regdev->agprd_masterhdl;
710 		err = ldi_ioctl(hdl, I8XX_GET_INFO, (intptr_t)&value1,
711 		    FKIOCTL, kcred, 0);
712 		if (err)
713 			return (-1);
714 		info->agpki_mdevid = value1.igd_devid;
715 		info->agpki_aperbase = value1.igd_aperbase;
716 		info->agpki_apersize = value1.igd_apersize;
717 
718 		hdl = agp_regdev->agprd_targethdl;
719 		err = ldi_ioctl(hdl, I8XX_GET_PREALLOC_SIZE,
720 		    (intptr_t)&prealloc_size, FKIOCTL, kcred, 0);
721 		if (err)
722 			return (-1);
723 		info->agpki_presize = prealloc_size;
724 
725 		break;
726 
727 	case ARC_IGD830:
728 		hdl = agp_regdev->agprd_masterhdl;
729 		err = ldi_ioctl(hdl, I8XX_GET_INFO, (intptr_t)&value1,
730 		    FKIOCTL, kcred, 0);
731 		if (err)
732 			return (-1);
733 		info->agpki_mdevid = value1.igd_devid;
734 		info->agpki_aperbase = value1.igd_aperbase;
735 		info->agpki_apersize = value1.igd_apersize;
736 
737 		hdl = agp_regdev->agprd_targethdl;
738 		err = ldi_ioctl(hdl, I8XX_GET_PREALLOC_SIZE,
739 		    (intptr_t)&prealloc_size, FKIOCTL, kcred, 0);
740 		if (err)
741 			return (-1);
742 
743 		/*
744 		 * Assume all units are kilobytes unless explicitly
745 		 * stated below:
746 		 * preallocated GTT memory = preallocated memory - GTT size
747 		 * 	- scratch page size
748 		 *
749 		 * scratch page size = 4
750 		 * GTT size (KB) = aperture size (MB)
751 		 * this algorithm came from Xorg source code
752 		 */
753 		if (prealloc_size > (info->agpki_apersize + 4))
754 			prealloc_size =
755 			    prealloc_size - info->agpki_apersize - 4;
756 		else {
757 			AGPDB_PRINT2((CE_WARN, "lyr_get_info: "
758 			    "pre-allocated memory too small, setting to zero"));
759 			prealloc_size = 0;
760 		}
761 		info->agpki_presize = prealloc_size;
762 		AGPDB_PRINT2((CE_NOTE,
763 		    "lyr_get_info: prealloc_size = %ldKB, apersize = %dMB",
764 		    prealloc_size, info->agpki_apersize));
765 		break;
766 	case ARC_INTELAGP:
767 	case ARC_AMD64AGP:
768 		/* AGP devices */
769 		hdl = agp_regdev->agprd_masterhdl;
770 		err = ldi_ioctl(hdl, AGP_MASTER_GETINFO,
771 		    (intptr_t)&value2, FKIOCTL, kcred, 0);
772 		if (err)
773 			return (-1);
774 		info->agpki_mdevid = value2.iagp_devid;
775 		info->agpki_mver = value2.iagp_ver;
776 		info->agpki_mstatus = value2.iagp_mode;
777 		hdl = agp_regdev->agprd_targethdl;
778 		err = ldi_ioctl(hdl, AGP_TARGET_GETINFO,
779 		    (intptr_t)&value2, FKIOCTL, kcred, 0);
780 		if (err)
781 			return (-1);
782 		info->agpki_tdevid = value2.iagp_devid;
783 		info->agpki_tver = value2.iagp_ver;
784 		info->agpki_tstatus = value2.iagp_mode;
785 		info->agpki_aperbase = value2.iagp_aperbase;
786 		info->agpki_apersize = value2.iagp_apersize;
787 		break;
788 	default:
789 		AGPDB_PRINT2((CE_WARN,
790 		    "lyr_get_info: function doesn't work for unknown arc"));
791 		return (-1);
792 	}
793 	if ((info->agpki_apersize >= MAXAPERMEGAS) ||
794 	    (info->agpki_apersize == 0) ||
795 	    (info->agpki_aperbase == 0)) {
796 		AGPDB_PRINT2((CE_WARN,
797 		    "lyr_get_info: aperture is not programmed correctly!"));
798 		return (-1);
799 	}
800 
801 	return (0);
802 }
803 
804 /*
805  * lyr_i8xx_add_to_gtt()
806  *
807  * Description:
808  * 	This function sets up the integrated video device gtt table
809  * 	via an ioclt to the AGP master driver.
810  *
811  * Arguments:
812  * 	pg_offset	The start entry to be setup
813  * 	keyent		Keytable entity pointer
814  *	agp_regdev	AGP devices registration struct pointer
815  *
816  * Returns:
817  * 	0		success
818  * 	-1		invalid operations
819  */
820 int
821 lyr_i8xx_add_to_gtt(uint32_t pg_offset, keytable_ent_t *keyent,
822     agp_registered_dev_t *agp_regdev)
823 {
824 	int err = 0;
825 	int rval;
826 	ldi_handle_t hdl;
827 	igd_gtt_seg_t gttseg;
828 	uint32_t *addrp, i;
829 	uint32_t npages;
830 
831 	ASSERT(keyent);
832 	ASSERT(agp_regdev);
833 	gttseg.igs_pgstart =  pg_offset;
834 	npages = keyent->kte_pages;
835 	gttseg.igs_npage = npages;
836 	gttseg.igs_type = keyent->kte_type;
837 	gttseg.igs_phyaddr = (uint32_t *)kmem_zalloc
838 	    (sizeof (uint32_t) * gttseg.igs_npage, KM_SLEEP);
839 
840 	addrp = gttseg.igs_phyaddr;
841 	for (i = 0; i < npages; i++, addrp++) {
842 		*addrp =
843 		    (uint32_t)((keyent->kte_pfnarray[i]) << GTT_PAGE_SHIFT);
844 	}
845 
846 	hdl = agp_regdev->agprd_masterhdl;
847 	if (ldi_ioctl(hdl, I8XX_ADD2GTT, (intptr_t)&gttseg, FKIOCTL,
848 	    kcred, &rval)) {
849 		AGPDB_PRINT2((CE_WARN, "lyr_i8xx_add_to_gtt: ldi_ioctl error"));
850 		AGPDB_PRINT2((CE_WARN, "lyr_i8xx_add_to_gtt: pg_start=0x%x",
851 		    gttseg.igs_pgstart));
852 		AGPDB_PRINT2((CE_WARN, "lyr_i8xx_add_to_gtt: pages=0x%x",
853 		    gttseg.igs_npage));
854 		AGPDB_PRINT2((CE_WARN, "lyr_i8xx_add_to_gtt: type=0x%x",
855 		    gttseg.igs_type));
856 		err = -1;
857 	}
858 	kmem_free(gttseg.igs_phyaddr, sizeof (uint32_t) * gttseg.igs_npage);
859 	return (err);
860 }
861 
862 /*
863  * lyr_i8xx_remove_from_gtt()
864  *
865  * Description:
866  * 	This function clears the integrated video device gtt table via
867  * 	an ioctl to the agp master device.
868  *
869  * Arguments:
870  * 	pg_offset	The starting entry to be cleared
871  * 	npage		The number of entries to be cleared
872  *	agp_regdev	AGP devices struct pointer
873  *
874  * Returns:
875  * 	0		success
876  * 	-1		invalid operations
877  */
878 int
879 lyr_i8xx_remove_from_gtt(uint32_t pg_offset, uint32_t npage,
880     agp_registered_dev_t *agp_regdev)
881 {
882 	int			rval;
883 	ldi_handle_t		hdl;
884 	igd_gtt_seg_t		gttseg;
885 
886 	gttseg.igs_pgstart =  pg_offset;
887 	gttseg.igs_npage = npage;
888 
889 	hdl = agp_regdev->agprd_masterhdl;
890 	if (ldi_ioctl(hdl, I8XX_REM_GTT, (intptr_t)&gttseg, FKIOCTL,
891 	    kcred, &rval))
892 		return (-1);
893 
894 	return (0);
895 }
896 
897 /*
898  * lyr_set_gart_addr()
899  *
900  * Description:
901  *	This function puts the gart table physical address in the
902  * 	gart base register.
903  *	Please refer to gart and gtt table base register format for
904  *	gart base register format in agpdefs.h.
905  *
906  * Arguments:
907  * 	phy_base	The base physical address of gart table
908  *	agp_regdev	AGP devices registration struct pointer
909  *
910  * Returns:
911  * 	0		success
912  * 	-1		failed
913  *
914  */
915 
916 int
917 lyr_set_gart_addr(uint64_t phy_base, agp_registered_dev_t *agp_regdev)
918 {
919 	amd64_gart_dev_list_t	*gart_list;
920 	ldi_handle_t		hdl;
921 	int			err = 0;
922 
923 	ASSERT(agp_regdev);
924 	switch (agp_regdev->agprd_arctype) {
925 	case ARC_IGD810:
926 	{
927 		uint32_t base;
928 
929 		ASSERT((phy_base & ~I810_POINTER_MASK) == 0);
930 		base = (uint32_t)phy_base;
931 
932 		hdl = agp_regdev->agprd_masterhdl;
933 		err = ldi_ioctl(hdl, I810_SET_GTT_BASE,
934 		    (intptr_t)&base, FKIOCTL, kcred, 0);
935 		break;
936 	}
937 	case ARC_INTELAGP:
938 	{
939 		uint32_t addr;
940 		addr = (uint32_t)phy_base;
941 
942 		ASSERT((phy_base & ~GTT_POINTER_MASK) == 0);
943 		hdl = agp_regdev->agprd_targethdl;
944 		err = ldi_ioctl(hdl, AGP_TARGET_SET_GATTADDR,
945 		    (intptr_t)&addr, FKIOCTL, kcred, 0);
946 		break;
947 	}
948 	case ARC_AMD64AGP:
949 	{
950 		uint32_t addr;
951 
952 		ASSERT((phy_base & ~AMD64_POINTER_MASK) == 0);
953 		addr = (uint32_t)((phy_base >> AMD64_GARTBASE_SHIFT)
954 		    & AMD64_GARTBASE_MASK);
955 
956 		for (gart_list = agp_regdev->agprd_cpugarts.gart_dev_list_head;
957 		    gart_list;
958 		    gart_list = gart_list->next) {
959 			hdl = gart_list->gart_devhdl;
960 			if (ldi_ioctl(hdl, AMD64_SET_GART_ADDR,
961 			    (intptr_t)&addr, FKIOCTL, kcred, 0)) {
962 				err = -1;
963 				break;
964 			}
965 		}
966 		break;
967 	}
968 	default:
969 		err = -1;
970 	}
971 
972 	if (err)
973 		return (-1);
974 
975 	return (0);
976 }
977 
978 int
979 lyr_set_agp_cmd(uint32_t cmd, agp_registered_dev_t *agp_regdev)
980 {
981 	ldi_handle_t hdl;
982 	uint32_t command;
983 
984 	ASSERT(agp_regdev);
985 	command = cmd;
986 	hdl = agp_regdev->agprd_targethdl;
987 	if (ldi_ioctl(hdl, AGP_TARGET_SETCMD,
988 	    (intptr_t)&command, FKIOCTL, kcred, 0))
989 		return (-1);
990 	hdl = agp_regdev->agprd_masterhdl;
991 	if (ldi_ioctl(hdl, AGP_MASTER_SETCMD,
992 	    (intptr_t)&command, FKIOCTL, kcred, 0))
993 		return (-1);
994 
995 	return (0);
996 }
997 
998 int
999 lyr_config_devices(agp_registered_dev_t *agp_regdev)
1000 {
1001 	amd64_gart_dev_list_t	*gart_list;
1002 	ldi_handle_t		hdl;
1003 	int			rc = 0;
1004 
1005 	ASSERT(agp_regdev);
1006 	switch (agp_regdev->agprd_arctype) {
1007 	case ARC_IGD830:
1008 	case ARC_IGD810:
1009 		break;
1010 	case ARC_INTELAGP:
1011 	{
1012 		hdl = agp_regdev->agprd_targethdl;
1013 		rc = ldi_ioctl(hdl, AGP_TARGET_CONFIGURE,
1014 		    0, FKIOCTL, kcred, 0);
1015 		break;
1016 	}
1017 	case ARC_AMD64AGP:
1018 	{
1019 		/*
1020 		 * BIOS always shadow registers such like Aperture Base
1021 		 * register, Aperture Size Register from the AGP bridge
1022 		 * to the AMD64 CPU host bridge. If future BIOSes are broken
1023 		 * in this regard, we may need to shadow these registers
1024 		 * in driver.
1025 		 */
1026 
1027 		for (gart_list = agp_regdev->agprd_cpugarts.gart_dev_list_head;
1028 		    gart_list;
1029 		    gart_list = gart_list->next) {
1030 			hdl = gart_list->gart_devhdl;
1031 			if (ldi_ioctl(hdl, AMD64_CONFIGURE,
1032 			    0, FKIOCTL, kcred, 0)) {
1033 				rc = -1;
1034 				break;
1035 			}
1036 		}
1037 		break;
1038 	}
1039 	default:
1040 		rc = -1;
1041 	}
1042 
1043 	if (rc)
1044 		return (-1);
1045 
1046 	return (0);
1047 }
1048 
1049 int
1050 lyr_unconfig_devices(agp_registered_dev_t *agp_regdev)
1051 {
1052 	amd64_gart_dev_list_t	*gart_list;
1053 	ldi_handle_t		hdl;
1054 	int			rc = 0;
1055 
1056 	ASSERT(agp_regdev);
1057 	switch (agp_regdev->agprd_arctype) {
1058 	case ARC_IGD830:
1059 	case ARC_IGD810:
1060 	{
1061 		hdl = agp_regdev->agprd_masterhdl;
1062 		rc = ldi_ioctl(hdl, I8XX_UNCONFIG, 0, FKIOCTL, kcred, 0);
1063 		break;
1064 	}
1065 	case ARC_INTELAGP:
1066 	{
1067 		hdl = agp_regdev->agprd_targethdl;
1068 		rc = ldi_ioctl(hdl, AGP_TARGET_UNCONFIG,
1069 		    0, FKIOCTL, kcred, 0);
1070 		break;
1071 	}
1072 	case ARC_AMD64AGP:
1073 	{
1074 		for (gart_list = agp_regdev->agprd_cpugarts.gart_dev_list_head;
1075 		    gart_list; gart_list = gart_list->next) {
1076 			hdl = gart_list->gart_devhdl;
1077 			if (ldi_ioctl(hdl, AMD64_UNCONFIG,
1078 			    0, FKIOCTL, kcred, 0)) {
1079 				rc = -1;
1080 				break;
1081 			}
1082 		}
1083 		break;
1084 	}
1085 	default:
1086 		rc = -1;
1087 	}
1088 
1089 	if (rc)
1090 		return (-1);
1091 
1092 	return (0);
1093 }
1094 
1095 /*
1096  * lyr_flush_gart_cache()
1097  *
1098  * Description:
1099  * 	This function flushes the GART translation look-aside buffer. All
1100  * 	GART translation caches will be flushed after this operation.
1101  *
1102  * Arguments:
1103  *	agp_regdev	AGP devices struct pointer
1104  */
1105 void
1106 lyr_flush_gart_cache(agp_registered_dev_t *agp_regdev)
1107 {
1108 	amd64_gart_dev_list_t	*gart_list;
1109 	ldi_handle_t		hdl;
1110 
1111 	ASSERT(agp_regdev);
1112 	if (agp_regdev->agprd_arctype == ARC_AMD64AGP) {
1113 		for (gart_list = agp_regdev->agprd_cpugarts.gart_dev_list_head;
1114 		    gart_list; gart_list = gart_list->next) {
1115 			hdl = gart_list->gart_devhdl;
1116 			(void) ldi_ioctl(hdl, AMD64_FLUSH_GTLB,
1117 			    0, FKIOCTL, kcred, 0);
1118 		}
1119 	} else if (agp_regdev->agprd_arctype == ARC_INTELAGP) {
1120 		hdl = agp_regdev->agprd_targethdl;
1121 		(void) ldi_ioctl(hdl, AGP_TARGET_FLUSH_GTLB, 0,
1122 		    FKIOCTL, kcred, 0);
1123 	}
1124 }
1125 
1126 /*
1127  * get_max_pages()
1128  *
1129  * Description:
1130  * 	This function compute the total pages allowed for agp aperture
1131  *	based on the ammount of physical pages.
1132  * 	The algorithm is: compare the aperture size with 1/4 of total
1133  *	physical pages, and use the smaller one to for the max available
1134  * 	pages.
1135  *
1136  * Arguments:
1137  * 	aper_size	system agp aperture size (in MB)
1138  *
1139  * Returns:
1140  * 	The max possible number of agp memory pages available to users
1141  */
1142 static uint32_t
1143 get_max_pages(uint32_t aper_size)
1144 {
1145 	uint32_t i, j;
1146 
1147 	ASSERT(aper_size <= MAXAPERMEGAS);
1148 
1149 	i = AGP_MB2PAGES(aper_size);
1150 	j = (physmem >> 2);
1151 
1152 	return ((i < j) ? i : j);
1153 }
1154 
1155 /*
1156  * agp_fill_empty_keyent()
1157  *
1158  * Description:
1159  * 	This function finds a empty key table slot and
1160  * 	fills it with a new entity.
1161  *
1162  * Arguments:
1163  * 	softsate	driver soft state pointer
1164  * 	entryp		new entity data pointer
1165  *
1166  * Returns:
1167  * 	NULL	no key table slot available
1168  * 	entryp	the new entity slot pointer
1169  */
1170 static keytable_ent_t *
1171 agp_fill_empty_keyent(agpgart_softstate_t *softstate, keytable_ent_t *entryp)
1172 {
1173 	int key;
1174 	keytable_ent_t *newentryp;
1175 
1176 	ASSERT(softstate);
1177 	ASSERT(entryp);
1178 	ASSERT(entryp->kte_memhdl);
1179 	ASSERT(entryp->kte_pfnarray);
1180 	ASSERT(mutex_owned(&softstate->asoft_instmutex));
1181 
1182 	for (key = 0; key < AGP_MAXKEYS; key++) {
1183 		newentryp = &softstate->asoft_table[key];
1184 		if (newentryp->kte_memhdl == NULL) {
1185 			break;
1186 		}
1187 	}
1188 
1189 	if (key >= AGP_MAXKEYS) {
1190 		AGPDB_PRINT2((CE_WARN,
1191 		    "agp_fill_empty_keyent: key table exhausted"));
1192 		return (NULL);
1193 	}
1194 
1195 	ASSERT(newentryp->kte_pfnarray == NULL);
1196 	bcopy(entryp, newentryp, sizeof (keytable_ent_t));
1197 	newentryp->kte_key = key;
1198 
1199 	return (newentryp);
1200 }
1201 
1202 /*
1203  * agp_find_bound_keyent()
1204  *
1205  * Description:
1206  * 	This function finds the key table entity by agp aperture page offset.
1207  * 	Every keytable entity will have an agp aperture range after the binding
1208  *	operation.
1209  *
1210  * Arguments:
1211  * 	softsate	driver soft state pointer
1212  * 	pg_offset	agp aperture page offset
1213  *
1214  * Returns:
1215  * 	NULL		no such keytable entity
1216  * 	pointer		key table entity pointer found
1217  */
1218 static keytable_ent_t *
1219 agp_find_bound_keyent(agpgart_softstate_t *softstate, uint32_t pg_offset)
1220 {
1221 	int keycount;
1222 	keytable_ent_t *entryp;
1223 
1224 	ASSERT(softstate);
1225 	ASSERT(mutex_owned(&softstate->asoft_instmutex));
1226 
1227 	for (keycount = 0; keycount < AGP_MAXKEYS; keycount++) {
1228 		entryp = &softstate->asoft_table[keycount];
1229 		if (entryp->kte_bound == 0) {
1230 			continue;
1231 		}
1232 
1233 		if (pg_offset < entryp->kte_pgoff)
1234 			continue;
1235 		if (pg_offset >= (entryp->kte_pgoff + entryp->kte_pages))
1236 			continue;
1237 
1238 		ASSERT(entryp->kte_memhdl);
1239 		ASSERT(entryp->kte_pfnarray);
1240 
1241 		return (entryp);
1242 	}
1243 
1244 	return (NULL);
1245 }
1246 
1247 /*
1248  * agp_check_off()
1249  *
1250  * Description:
1251  * 	This function checks whether an AGP aperture range to be bound
1252  *	overlaps with AGP offset already bound.
1253  *
1254  * Arguments:
1255  *	entryp		key table start entry pointer
1256  * 	pg_start	AGP range start page offset
1257  *	pg_num		pages number to be bound
1258  *
1259  * Returns:
1260  *	0		Does not overlap
1261  *	-1		Overlaps
1262  */
1263 
1264 static int
1265 agp_check_off(keytable_ent_t *entryp, uint32_t pg_start, uint32_t pg_num)
1266 {
1267 	int key;
1268 	uint64_t pg_end;
1269 	uint64_t kpg_end;
1270 
1271 	ASSERT(entryp);
1272 
1273 	pg_end = pg_start + pg_num;
1274 	for (key = 0; key < AGP_MAXKEYS; key++) {
1275 		if (!entryp[key].kte_bound)
1276 			continue;
1277 
1278 		kpg_end = entryp[key].kte_pgoff + entryp[key].kte_pages;
1279 		if (!((pg_end <= entryp[key].kte_pgoff) ||
1280 		    (pg_start >= kpg_end)))
1281 			break;
1282 	}
1283 
1284 	if (key == AGP_MAXKEYS)
1285 		return (0);
1286 	else
1287 		return (-1);
1288 }
1289 
1290 static int
1291 is_controlling_proc(agpgart_softstate_t *st)
1292 {
1293 	ASSERT(st);
1294 
1295 	if (!st->asoft_acquired) {
1296 		AGPDB_PRINT2((CE_WARN,
1297 		    "ioctl_agpgart_setup: gart not acquired"));
1298 		return (-1);
1299 	}
1300 	if (st->asoft_curpid != ddi_get_pid()) {
1301 		AGPDB_PRINT2((CE_WARN,
1302 		    "ioctl_agpgart_release: not  controlling process"));
1303 		return (-1);
1304 	}
1305 
1306 	return (0);
1307 }
1308 
1309 static void release_control(agpgart_softstate_t *st)
1310 {
1311 	st->asoft_curpid = 0;
1312 	st->asoft_acquired = 0;
1313 }
1314 
1315 static void acquire_control(agpgart_softstate_t *st)
1316 {
1317 	st->asoft_curpid = ddi_get_pid();
1318 	st->asoft_acquired = 1;
1319 }
1320 
1321 /*
1322  * agp_remove_from_gart()
1323  *
1324  * Description:
1325  * 	This function fills the gart table entries by a given page
1326  * 	frame number array and setup the agp aperture page to physical
1327  * 	memory page translation.
1328  * Arguments:
1329  * 	pg_offset	Starting aperture page to be bound
1330  * 	entries		the number of pages to be bound
1331  * 	acc_hdl		GART table dma memory acc handle
1332  * 	tablep		GART table kernel virtual address
1333  */
1334 static void
1335 agp_remove_from_gart(
1336     uint32_t pg_offset,
1337     uint32_t entries,
1338     ddi_dma_handle_t dma_hdl,
1339     uint32_t *tablep)
1340 {
1341 	uint32_t items = 0;
1342 	uint32_t *entryp;
1343 
1344 	entryp = tablep + pg_offset;
1345 	while (items < entries) {
1346 		*(entryp + items) = 0;
1347 		items++;
1348 	}
1349 	(void) ddi_dma_sync(dma_hdl, pg_offset * sizeof (uint32_t),
1350 	    entries * sizeof (uint32_t), DDI_DMA_SYNC_FORDEV);
1351 }
1352 
1353 /*
1354  * agp_unbind_key()
1355  *
1356  * Description:
1357  * 	This function unbinds AGP memory from the gart table. It will clear
1358  * 	all the gart entries related to this agp memory.
1359  *
1360  * Arguments:
1361  * 	softstate		driver soft state pointer
1362  * 	entryp			key table entity pointer
1363  *
1364  * Returns:
1365  * 	EINVAL		invalid key table entity pointer
1366  * 	0		success
1367  *
1368  */
1369 static int
1370 agp_unbind_key(agpgart_softstate_t *softstate, keytable_ent_t *entryp)
1371 {
1372 	int retval = 0;
1373 
1374 	ASSERT(entryp);
1375 	ASSERT((entryp->kte_key >= 0) && (entryp->kte_key < AGP_MAXKEYS));
1376 
1377 	if (!entryp->kte_bound) {
1378 		AGPDB_PRINT2((CE_WARN,
1379 		    "agp_unbind_key: key = 0x%x, not bound",
1380 		    entryp->kte_key));
1381 		return (EINVAL);
1382 	}
1383 	if (entryp->kte_refcnt) {
1384 		AGPDB_PRINT2((CE_WARN,
1385 		    "agp_unbind_key: memory is exported to users"));
1386 		return (EINVAL);
1387 	}
1388 
1389 	ASSERT((entryp->kte_pgoff + entryp->kte_pages) <=
1390 	    AGP_MB2PAGES(softstate->asoft_info.agpki_apersize));
1391 	ASSERT((softstate->asoft_devreg.agprd_arctype != ARC_UNKNOWN));
1392 
1393 	switch (softstate->asoft_devreg.agprd_arctype) {
1394 	case ARC_IGD810:
1395 	case ARC_IGD830:
1396 		retval = lyr_i8xx_remove_from_gtt(
1397 		    entryp->kte_pgoff, entryp->kte_pages,
1398 		    &softstate->asoft_devreg);
1399 		if (retval) {
1400 			AGPDB_PRINT2((CE_WARN,
1401 			    "agp_unbind_key: Key = 0x%x, clear table error",
1402 			    entryp->kte_key));
1403 			return (EIO);
1404 		}
1405 		break;
1406 	case ARC_INTELAGP:
1407 	case ARC_AMD64AGP:
1408 		agp_remove_from_gart(entryp->kte_pgoff,
1409 		    entryp->kte_pages,
1410 		    softstate->gart_dma_handle,
1411 		    (uint32_t *)softstate->gart_vbase);
1412 		/* Flush GTLB table */
1413 		lyr_flush_gart_cache(&softstate->asoft_devreg);
1414 
1415 		break;
1416 	}
1417 
1418 	entryp->kte_bound = 0;
1419 
1420 	return (0);
1421 }
1422 
1423 /*
1424  * agp_dealloc_kmem()
1425  *
1426  * Description:
1427  * 	This function deallocates dma memory resources for userland
1428  * 	applications.
1429  *
1430  * Arguments:
1431  * 	entryp		keytable entity pointer
1432  */
1433 static void
1434 agp_dealloc_kmem(keytable_ent_t *entryp)
1435 {
1436 	kmem_free(entryp->kte_pfnarray, sizeof (pfn_t) * entryp->kte_pages);
1437 	entryp->kte_pfnarray = NULL;
1438 
1439 	(void) ddi_dma_unbind_handle(KMEMP(entryp->kte_memhdl)->kmem_handle);
1440 	KMEMP(entryp->kte_memhdl)->kmem_cookies_num = 0;
1441 	ddi_dma_mem_free(&KMEMP(entryp->kte_memhdl)->kmem_acchdl);
1442 	KMEMP(entryp->kte_memhdl)->kmem_acchdl = NULL;
1443 	KMEMP(entryp->kte_memhdl)->kmem_reallen = 0;
1444 	KMEMP(entryp->kte_memhdl)->kmem_kvaddr = NULL;
1445 
1446 	ddi_dma_free_handle(&(KMEMP(entryp->kte_memhdl)->kmem_handle));
1447 	KMEMP(entryp->kte_memhdl)->kmem_handle = NULL;
1448 
1449 	kmem_free(entryp->kte_memhdl, sizeof (agp_kmem_handle_t));
1450 	entryp->kte_memhdl = NULL;
1451 }
1452 
1453 /*
1454  * agp_dealloc_pmem()
1455  *
1456  * Description:
1457  * 	This function deallocates memory resource for direct mapping to
1458  * 	userland applications.
1459  *
1460  * Arguments:
1461  * 	entryp		key table entity pointer
1462  *
1463  */
1464 static void
1465 agp_dealloc_pmem(keytable_ent_t *entryp)
1466 {
1467 	devmap_pmem_free(PMEMP(entryp->kte_memhdl)->pmem_cookie);
1468 	PMEMP(entryp->kte_memhdl)->pmem_cookie = NULL;
1469 	kmem_free(entryp->kte_memhdl, sizeof (agp_pmem_handle_t));
1470 	entryp->kte_memhdl = NULL;
1471 
1472 	/* free the page frame number array */
1473 	kmem_free(entryp->kte_pfnarray, sizeof (pfn_t) * entryp->kte_pages);
1474 	entryp->kte_pfnarray = NULL;
1475 }
1476 
1477 /*
1478  * agp_dealloc_mem()
1479  *
1480  * Description:
1481  * 	This function deallocates physical memory resources allocated for
1482  *	userland applications.
1483  *
1484  * Arguments:
1485  * 	st		driver soft state pointer
1486  * 	entryp		key table entity pointer
1487  *
1488  * Returns:
1489  * 	-1		not a valid memory type or the memory is mapped by
1490  * 			user area applications
1491  * 	0		success
1492  */
1493 static int
1494 agp_dealloc_mem(agpgart_softstate_t *st, keytable_ent_t	*entryp)
1495 {
1496 
1497 	ASSERT(entryp);
1498 	ASSERT(st);
1499 	ASSERT(entryp->kte_memhdl);
1500 	ASSERT(mutex_owned(&st->asoft_instmutex));
1501 
1502 	/* auto unbind here */
1503 	if (entryp->kte_bound && !entryp->kte_refcnt) {
1504 		AGPDB_PRINT2((CE_WARN,
1505 		    "agp_dealloc_mem: key=0x%x, auto unbind",
1506 		    entryp->kte_key));
1507 
1508 		/*
1509 		 * agp_dealloc_mem may be called indirectly by agp_detach.
1510 		 * In the agp_detach function, agpgart_close is already
1511 		 * called which will free the gart table. agp_unbind_key
1512 		 * will panic if no valid gart table exists. So test if
1513 		 * gart table exsits here.
1514 		 */
1515 		if (st->asoft_opened)
1516 			(void) agp_unbind_key(st, entryp);
1517 	}
1518 	if (entryp->kte_refcnt) {
1519 		AGPDB_PRINT2((CE_WARN,
1520 		    "agp_dealloc_pmem: memory is exported to users"));
1521 		return (-1);
1522 	}
1523 
1524 	switch (entryp->kte_type) {
1525 	case AGP_NORMAL:
1526 		agp_dealloc_pmem(entryp);
1527 		break;
1528 	case AGP_PHYSICAL:
1529 		agp_dealloc_kmem(entryp);
1530 		break;
1531 	default:
1532 		return (-1);
1533 	}
1534 
1535 	return (0);
1536 }
1537 
1538 /*
1539  * agp_del_allkeys()
1540  *
1541  * Description:
1542  * 	This function calls agp_dealloc_mem to release all the agp memory
1543  *	resource allocated.
1544  *
1545  * Arguments:
1546  * 	softsate	driver soft state pointer
1547  * Returns:
1548  * 	-1		can not free all agp memory
1549  * 	0		success
1550  *
1551  */
1552 static int
1553 agp_del_allkeys(agpgart_softstate_t *softstate)
1554 {
1555 	int key;
1556 	int ret = 0;
1557 
1558 	ASSERT(softstate);
1559 	for (key = 0; key < AGP_MAXKEYS; key++) {
1560 		if (softstate->asoft_table[key].kte_memhdl != NULL) {
1561 			/*
1562 			 * Check if we can free agp memory now.
1563 			 * If agp memory is exported to user
1564 			 * applications, agp_dealloc_mem will fail.
1565 			 */
1566 			if (agp_dealloc_mem(softstate,
1567 			    &softstate->asoft_table[key]))
1568 				ret = -1;
1569 		}
1570 	}
1571 
1572 	return (ret);
1573 }
1574 
1575 /*
1576  * pfn2gartentry()
1577  *
1578  * Description:
1579  *	This function converts a physical address to GART entry.
1580  *	For AMD64, hardware only support addresses below 40bits,
1581  *	about 1024G physical address, so the largest pfn
1582  *	number is below 28 bits. Please refer to GART and GTT entry
1583  *	format table in agpdefs.h for entry format. Intel IGD only
1584  * 	only supports GTT entry below 1G. Intel AGP only supports
1585  * 	GART entry below 4G.
1586  *
1587  * Arguments:
1588  * 	arc_type		system agp arc type
1589  * 	pfn			page frame number
1590  * 	itemv			the entry item to be returned
1591  * Returns:
1592  * 	-1			not a invalid page frame
1593  * 	0			conversion success
1594  */
1595 static int
1596 pfn2gartentry(agp_arc_type_t arc_type, pfn_t pfn, uint32_t *itemv)
1597 {
1598 	uint64_t paddr;
1599 
1600 	paddr = pfn<<AGP_PAGE_SHIFT;
1601 
1602 	switch (arc_type) {
1603 	case ARC_INTELAGP:
1604 	{
1605 		/* Only support 32-bit hardware address */
1606 		if ((paddr & ~AGP_INTEL_POINTER_MASK) != 0) {
1607 			AGPDB_PRINT2((CE_WARN,
1608 			    "INTEL AGP Hardware only support 32 bits"));
1609 			return (-1);
1610 		}
1611 		*itemv =  (pfn << AGP_PAGE_SHIFT) | AGP_ENTRY_VALID;
1612 
1613 		break;
1614 	}
1615 	case ARC_AMD64AGP:
1616 	{
1617 		uint32_t value1, value2;
1618 		/* Physaddr should not exceed 40-bit */
1619 		if ((paddr & ~AMD64_POINTER_MASK) != 0) {
1620 			AGPDB_PRINT2((CE_WARN,
1621 			    "AMD64 GART hardware only supoort 40 bits"));
1622 			return (-1);
1623 		}
1624 		value1 = (uint32_t)pfn >> 20;
1625 		value1 <<= 4;
1626 		value2 = (uint32_t)pfn << 12;
1627 
1628 		*itemv = value1 | value2 | AMD64_ENTRY_VALID;
1629 		break;
1630 	}
1631 	case ARC_IGD810:
1632 		if ((paddr & ~I810_POINTER_MASK) != 0) {
1633 			AGPDB_PRINT2((CE_WARN,
1634 			    "Intel i810 only support 30 bits"));
1635 			return (-1);
1636 		}
1637 		break;
1638 
1639 	case ARC_IGD830:
1640 		if ((paddr & ~GTT_POINTER_MASK) != 0) {
1641 			AGPDB_PRINT2((CE_WARN,
1642 			    "Intel IGD only support 32 bits"));
1643 			return (-1);
1644 		}
1645 		break;
1646 	default:
1647 		AGPDB_PRINT2((CE_WARN,
1648 		    "pfn2gartentry: arc type = %d, not support", arc_type));
1649 		return (-1);
1650 	}
1651 	return (0);
1652 }
1653 
1654 /*
1655  * Check allocated physical pages validity, only called in DEBUG
1656  * mode.
1657  */
1658 static int
1659 agp_check_pfns(agp_arc_type_t arc_type, pfn_t *pfnarray, int items)
1660 {
1661 	int count;
1662 	uint32_t ret;
1663 
1664 	for (count = 0; count < items; count++) {
1665 		if (pfn2gartentry(arc_type, pfnarray[count], &ret))
1666 			break;
1667 	}
1668 	if (count < items)
1669 		return (-1);
1670 	else
1671 		return (0);
1672 }
1673 
1674 /*
1675  * kmem_getpfns()
1676  *
1677  * Description:
1678  * 	This function gets page frame numbers from dma handle.
1679  *
1680  * Arguments:
1681  * 	dma_handle		dma hanle allocated by ddi_dma_alloc_handle
1682  * 	dma_cookip		dma cookie pointer
1683  * 	cookies_num		cookies number
1684  * 	pfnarray		array to store page frames
1685  *
1686  * Returns:
1687  *	0		success
1688  */
1689 static int
1690 kmem_getpfns(
1691     ddi_dma_handle_t dma_handle,
1692     ddi_dma_cookie_t *dma_cookiep,
1693     int cookies_num,
1694     pfn_t *pfnarray)
1695 {
1696 	int	num_cookies;
1697 	int	index = 0;
1698 
1699 	num_cookies = cookies_num;
1700 
1701 	while (num_cookies > 0) {
1702 		uint64_t ck_startaddr, ck_length, ck_end;
1703 		ck_startaddr = dma_cookiep->dmac_address;
1704 		ck_length = dma_cookiep->dmac_size;
1705 
1706 		ck_end = ck_startaddr + ck_length;
1707 		while (ck_startaddr < ck_end) {
1708 			pfnarray[index] = (pfn_t)ck_startaddr >> AGP_PAGE_SHIFT;
1709 			ck_startaddr += AGP_PAGE_SIZE;
1710 			index++;
1711 		}
1712 
1713 		num_cookies--;
1714 		if (num_cookies > 0) {
1715 			ddi_dma_nextcookie(dma_handle, dma_cookiep);
1716 		}
1717 	}
1718 
1719 	return (0);
1720 }
1721 
1722 static int
1723 copyinfo(agpgart_softstate_t *softstate, agp_info_t *info)
1724 {
1725 	switch (softstate->asoft_devreg.agprd_arctype) {
1726 	case ARC_IGD810:
1727 	case ARC_IGD830:
1728 		info->agpi_version.agpv_major = 0;
1729 		info->agpi_version.agpv_minor = 0;
1730 		info->agpi_devid = softstate->asoft_info.agpki_mdevid;
1731 		info->agpi_mode = 0;
1732 		break;
1733 	case ARC_INTELAGP:
1734 	case ARC_AMD64AGP:
1735 		info->agpi_version = softstate->asoft_info.agpki_tver;
1736 		info->agpi_devid = softstate->asoft_info.agpki_tdevid;
1737 		info->agpi_mode = softstate->asoft_info.agpki_tstatus;
1738 		break;
1739 	default:
1740 		AGPDB_PRINT2((CE_WARN, "copyinfo: UNKNOW ARC"));
1741 		return (-1);
1742 	}
1743 	/*
1744 	 * 64bit->32bit conversion possible
1745 	 */
1746 	info->agpi_aperbase = softstate->asoft_info.agpki_aperbase;
1747 	info->agpi_apersize = softstate->asoft_info.agpki_apersize;
1748 	info->agpi_pgtotal = softstate->asoft_pgtotal;
1749 	info->agpi_pgsystem = info->agpi_pgtotal;
1750 	info->agpi_pgused = softstate->asoft_pgused;
1751 
1752 	return (0);
1753 }
1754 
1755 static uint32_t
1756 agp_v2_setup(uint32_t tstatus, uint32_t mstatus, uint32_t mode)
1757 {
1758 	uint32_t cmd;
1759 	int rq, sba, over4g, fw, rate;
1760 
1761 	/*
1762 	 * tstatus: target device status
1763 	 * mstatus: master device status
1764 	 * mode: the agp mode to be sent
1765 	 */
1766 
1767 	/*
1768 	 * RQ - Request Queue size
1769 	 * set RQ to the min of mode and tstatus
1770 	 * if mode set a RQ larger than hardware can support,
1771 	 * use the max RQ which hardware can support.
1772 	 * tstatus & AGPSTAT_RQ_MASK is the max RQ hardware can support
1773 	 * Corelogic will enqueue agp transaction
1774 	 */
1775 	rq = mode & AGPSTAT_RQ_MASK;
1776 	if ((tstatus & AGPSTAT_RQ_MASK) < rq)
1777 		rq = tstatus & AGPSTAT_RQ_MASK;
1778 
1779 	/*
1780 	 * SBA - Sideband Addressing
1781 	 *
1782 	 * Sideband Addressing provides an additional bus to pass requests
1783 	 * (address and command) to the target from the master.
1784 	 *
1785 	 * set SBA if all three support it
1786 	 */
1787 	sba = (tstatus & AGPSTAT_SBA) & (mstatus & AGPSTAT_SBA)
1788 	    & (mode & AGPSTAT_SBA);
1789 
1790 	/* set OVER4G  if all three support it */
1791 	over4g = (tstatus & AGPSTAT_OVER4G) & (mstatus & AGPSTAT_OVER4G)
1792 	    & (mode & AGPSTAT_OVER4G);
1793 
1794 	/*
1795 	 * FW - fast write
1796 	 *
1797 	 * acceleration of memory write transactions from the corelogic to the
1798 	 * A.G.P. master device acting like a PCI target.
1799 	 *
1800 	 * set FW if all three support it
1801 	 */
1802 	fw = (tstatus & AGPSTAT_FW) & (mstatus & AGPSTAT_FW)
1803 	    & (mode & AGPSTAT_FW);
1804 
1805 	/*
1806 	 * figure out the max rate
1807 	 * AGP v2 support: 4X, 2X, 1X speed
1808 	 * status bit		meaning
1809 	 * ---------------------------------------------
1810 	 * 7:3			others
1811 	 * 3			0 stand for V2 support
1812 	 * 0:2			001:1X, 010:2X, 100:4X
1813 	 * ----------------------------------------------
1814 	 */
1815 	rate = (tstatus & AGPSTAT_RATE_MASK) & (mstatus & AGPSTAT_RATE_MASK)
1816 	    & (mode & AGPSTAT_RATE_MASK);
1817 	if (rate & AGP2_RATE_4X)
1818 		rate = AGP2_RATE_4X;
1819 	else if (rate & AGP2_RATE_2X)
1820 		rate = AGP2_RATE_2X;
1821 	else
1822 		rate = AGP2_RATE_1X;
1823 
1824 	cmd = rq | sba | over4g | fw | rate;
1825 	/* enable agp mode */
1826 	cmd |= AGPCMD_AGPEN;
1827 
1828 	return (cmd);
1829 }
1830 
1831 static uint32_t
1832 agp_v3_setup(uint32_t tstatus, uint32_t mstatus, uint32_t mode)
1833 {
1834 	uint32_t cmd = 0;
1835 	uint32_t rq, arqsz, cal, sba, over4g, fw, rate;
1836 
1837 	/*
1838 	 * tstatus: target device status
1839 	 * mstatus: master device status
1840 	 * mode: the agp mode to be set
1841 	 */
1842 
1843 	/*
1844 	 * RQ - Request Queue size
1845 	 * Set RQ to the min of mode and tstatus
1846 	 * If mode set a RQ larger than hardware can support,
1847 	 * use the max RQ which hardware can support.
1848 	 * tstatus & AGPSTAT_RQ_MASK is the max RQ hardware can support
1849 	 * Corelogic will enqueue agp transaction;
1850 	 */
1851 	rq = mode & AGPSTAT_RQ_MASK;
1852 	if ((tstatus & AGPSTAT_RQ_MASK) < rq)
1853 		rq = tstatus & AGPSTAT_RQ_MASK;
1854 
1855 	/*
1856 	 * ARQSZ - Asynchronous Request Queue size
1857 	 * Set the value equal to tstatus.
1858 	 * Don't allow the mode register to override values
1859 	 */
1860 	arqsz = tstatus & AGPSTAT_ARQSZ_MASK;
1861 
1862 	/*
1863 	 * CAL - Calibration cycle
1864 	 * Set to the min of tstatus and mstatus
1865 	 * Don't allow override by mode register
1866 	 */
1867 	cal = tstatus & AGPSTAT_CAL_MASK;
1868 	if ((mstatus & AGPSTAT_CAL_MASK) < cal)
1869 		cal = mstatus & AGPSTAT_CAL_MASK;
1870 
1871 	/*
1872 	 * SBA - Sideband Addressing
1873 	 *
1874 	 * Sideband Addressing provides an additional bus to pass requests
1875 	 * (address and command) to the target from the master.
1876 	 *
1877 	 * SBA in agp v3.0 must be set
1878 	 */
1879 	sba = AGPCMD_SBAEN;
1880 
1881 	/* GART64B is not set since no hardware supports it now */
1882 
1883 	/* Set OVER4G if all three support it */
1884 	over4g = (tstatus & AGPSTAT_OVER4G) & (mstatus & AGPSTAT_OVER4G)
1885 	    & (mode & AGPSTAT_OVER4G);
1886 
1887 	/*
1888 	 * FW - fast write
1889 	 *
1890 	 * Acceleration of memory write transactions from the corelogic to the
1891 	 * A.G.P. master device acting like a PCI target.
1892 	 *
1893 	 * Always set FW in AGP 3.0
1894 	 */
1895 	fw = (tstatus & AGPSTAT_FW) & (mstatus & AGPSTAT_FW)
1896 	    & (mode & AGPSTAT_FW);
1897 
1898 	/*
1899 	 * Figure out the max rate
1900 	 *
1901 	 * AGP v3 support: 8X, 4X speed
1902 	 *
1903 	 * status bit		meaning
1904 	 * ---------------------------------------------
1905 	 * 7:3			others
1906 	 * 3			1 stand for V3 support
1907 	 * 0:2			001:4X, 010:8X, 011:4X,8X
1908 	 * ----------------------------------------------
1909 	 */
1910 	rate = (tstatus & AGPSTAT_RATE_MASK) & (mstatus & AGPSTAT_RATE_MASK)
1911 	    & (mode & AGPSTAT_RATE_MASK);
1912 	if (rate & AGP3_RATE_8X)
1913 		rate = AGP3_RATE_8X;
1914 	else
1915 		rate = AGP3_RATE_4X;
1916 
1917 	cmd = rq | arqsz | cal | sba | over4g | fw | rate;
1918 	/* Enable AGP mode */
1919 	cmd |= AGPCMD_AGPEN;
1920 
1921 	return (cmd);
1922 }
1923 
1924 static int
1925 agp_setup(agpgart_softstate_t *softstate, uint32_t mode)
1926 {
1927 	uint32_t tstatus, mstatus;
1928 	uint32_t agp_mode;
1929 
1930 	tstatus = softstate->asoft_info.agpki_tstatus;
1931 	mstatus = softstate->asoft_info.agpki_mstatus;
1932 
1933 	/*
1934 	 * There are three kinds of AGP mode. AGP mode 1.0, 2.0, 3.0
1935 	 * AGP mode 2.0 is fully compatible with AGP mode 1.0, so we
1936 	 * only check 2.0 and 3.0 mode. AGP 3.0 device can work in
1937 	 * two AGP 2.0 or AGP 3.0 mode. By checking AGP status register,
1938 	 * we can get which mode it is working at. The working mode of
1939 	 * AGP master and AGP target must be consistent. That is, both
1940 	 * of them must work on AGP 3.0 mode or AGP 2.0 mode.
1941 	 */
1942 	if ((softstate->asoft_info.agpki_tver.agpv_major == 3) &&
1943 	    (tstatus & AGPSTAT_MODE3)) {
1944 		/* Master device should be 3.0 mode, too */
1945 		if ((softstate->asoft_info.agpki_mver.agpv_major != 3) ||
1946 		    ((mstatus & AGPSTAT_MODE3) == 0))
1947 			return (EIO);
1948 
1949 		agp_mode = agp_v3_setup(tstatus, mstatus, mode);
1950 		/* Write to the AGPCMD register of target and master devices */
1951 		if (lyr_set_agp_cmd(agp_mode,
1952 		    &softstate->asoft_devreg))
1953 			return (EIO);
1954 
1955 		softstate->asoft_mode = agp_mode;
1956 
1957 		return (0);
1958 	}
1959 
1960 	/*
1961 	 * If agp taget device doesn't work in AGP 3.0 mode,
1962 	 * it must work in AGP 2.0 mode. And make sure
1963 	 * master device work in AGP 2.0 mode too
1964 	 */
1965 	if ((softstate->asoft_info.agpki_mver.agpv_major == 3) &&
1966 	    (mstatus & AGPSTAT_MODE3))
1967 		return (EIO);
1968 
1969 	agp_mode = agp_v2_setup(tstatus, mstatus, mode);
1970 	if (lyr_set_agp_cmd(agp_mode, &softstate->asoft_devreg))
1971 		return (EIO);
1972 	softstate->asoft_mode = agp_mode;
1973 
1974 	return (0);
1975 }
1976 
1977 /*
1978  * agp_alloc_pmem()
1979  *
1980  * Description:
1981  * 	This function allocates physical memory for direct mapping to userland
1982  * 	applications.
1983  *
1984  * Arguments:
1985  * 	softsate	driver soft state pointer
1986  * 	length		memory size
1987  * 	type		AGP_NORMAL: normal agp memory, AGP_PHISYCAL: specical
1988  *			memory type for intel i810 IGD
1989  *
1990  * Returns:
1991  * 	entryp		new key table entity pointer
1992  * 	NULL		no key table slot available
1993  */
1994 static keytable_ent_t *
1995 agp_alloc_pmem(agpgart_softstate_t *softstate, size_t length, int type)
1996 {
1997 	keytable_ent_t	keyentry;
1998 	keytable_ent_t	*entryp;
1999 
2000 	ASSERT(AGP_ALIGNED(length));
2001 	bzero(&keyentry, sizeof (keytable_ent_t));
2002 
2003 	keyentry.kte_pages = AGP_BYTES2PAGES(length);
2004 	keyentry.kte_type = type;
2005 
2006 	keyentry.kte_memhdl =
2007 	    (agp_pmem_handle_t *)kmem_zalloc(sizeof (agp_pmem_handle_t),
2008 	    KM_SLEEP);
2009 
2010 	if (devmap_pmem_alloc(length,
2011 	    PMEM_SLEEP,
2012 	    &PMEMP(keyentry.kte_memhdl)->pmem_cookie) != DDI_SUCCESS)
2013 		goto err1;
2014 
2015 	keyentry.kte_pfnarray = (pfn_t *)kmem_zalloc(sizeof (pfn_t) *
2016 	    keyentry.kte_pages, KM_SLEEP);
2017 
2018 	if (devmap_pmem_getpfns(
2019 	    PMEMP(keyentry.kte_memhdl)->pmem_cookie,
2020 	    0, keyentry.kte_pages, keyentry.kte_pfnarray) != DDI_SUCCESS) {
2021 		AGPDB_PRINT2((CE_WARN,
2022 		    "agp_alloc_pmem: devmap_map_getpfns failed"));
2023 		goto err2;
2024 	}
2025 	ASSERT(!agp_check_pfns(softstate->asoft_devreg.agprd_arctype,
2026 	    keyentry.kte_pfnarray, keyentry.kte_pages));
2027 	entryp = agp_fill_empty_keyent(softstate, &keyentry);
2028 
2029 	if (!entryp) {
2030 		AGPDB_PRINT2((CE_WARN,
2031 		    "agp_alloc_pmem: agp_fill_empty_keyent error"));
2032 		goto err2;
2033 	}
2034 	ASSERT((entryp->kte_key >= 0) && (entryp->kte_key < AGP_MAXKEYS));
2035 
2036 	return (entryp);
2037 
2038 err2:
2039 	kmem_free(keyentry.kte_pfnarray, sizeof (pfn_t) * keyentry.kte_pages);
2040 	keyentry.kte_pfnarray = NULL;
2041 	devmap_pmem_free(PMEMP(keyentry.kte_memhdl)->pmem_cookie);
2042 	PMEMP(keyentry.kte_memhdl)->pmem_cookie = NULL;
2043 err1:
2044 	kmem_free(keyentry.kte_memhdl, sizeof (agp_pmem_handle_t));
2045 	keyentry.kte_memhdl = NULL;
2046 
2047 	return (NULL);
2048 
2049 }
2050 
2051 /*
2052  * agp_alloc_kmem()
2053  *
2054  * Description:
2055  * 	This function allocates physical memory for userland applications
2056  * 	by ddi interfaces. This function can only be called to allocate
2057  *	small phsyical contiguous pages, usually tens of kilobytes.
2058  *
2059  * Arguments:
2060  * 	softsate	driver soft state pointer
2061  * 	length		memory size
2062  *
2063  * Returns:
2064  * 	entryp		new keytable entity pointer
2065  * 	NULL		no keytable slot available or no physical
2066  *			memory available
2067  */
2068 static keytable_ent_t *
2069 agp_alloc_kmem(agpgart_softstate_t *softstate, size_t length)
2070 {
2071 	keytable_ent_t	keyentry;
2072 	keytable_ent_t	*entryp;
2073 	int		ret;
2074 
2075 	ASSERT(AGP_ALIGNED(length));
2076 
2077 	bzero(&keyentry, sizeof (keytable_ent_t));
2078 
2079 	keyentry.kte_pages = AGP_BYTES2PAGES(length);
2080 	keyentry.kte_type = AGP_PHYSICAL;
2081 
2082 	/*
2083 	 * Set dma_attr_sgllen to assure contiguous physical pages
2084 	 */
2085 	agpgart_dma_attr.dma_attr_sgllen = 1;
2086 
2087 	/* 4k size pages */
2088 	keyentry.kte_memhdl = kmem_zalloc(sizeof (agp_kmem_handle_t), KM_SLEEP);
2089 
2090 	if (ddi_dma_alloc_handle(softstate->asoft_dip,
2091 	    &agpgart_dma_attr,
2092 	    DDI_DMA_SLEEP, NULL,
2093 	    &(KMEMP(keyentry.kte_memhdl)->kmem_handle))) {
2094 		AGPDB_PRINT2((CE_WARN,
2095 		    "agp_alloc_kmem: ddi_dma_allco_hanlde error"));
2096 		goto err4;
2097 	}
2098 
2099 	if ((ret = ddi_dma_mem_alloc(
2100 	    KMEMP(keyentry.kte_memhdl)->kmem_handle,
2101 	    length,
2102 	    &gart_dev_acc_attr,
2103 	    DDI_DMA_CONSISTENT,
2104 	    DDI_DMA_SLEEP, NULL,
2105 	    &KMEMP(keyentry.kte_memhdl)->kmem_kvaddr,
2106 	    &KMEMP(keyentry.kte_memhdl)->kmem_reallen,
2107 	    &KMEMP(keyentry.kte_memhdl)->kmem_acchdl)) != 0) {
2108 		AGPDB_PRINT2((CE_WARN,
2109 		    "agp_alloc_kmem: ddi_dma_mem_alloc error"));
2110 
2111 		goto err3;
2112 	}
2113 
2114 	ret = ddi_dma_addr_bind_handle(
2115 	    KMEMP(keyentry.kte_memhdl)->kmem_handle,
2116 	    NULL,
2117 	    KMEMP(keyentry.kte_memhdl)->kmem_kvaddr,
2118 	    length,
2119 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2120 	    DDI_DMA_SLEEP,
2121 	    NULL,
2122 	    &KMEMP(keyentry.kte_memhdl)->kmem_dcookie,
2123 	    &KMEMP(keyentry.kte_memhdl)->kmem_cookies_num);
2124 
2125 	/*
2126 	 * Even dma_attr_sgllen = 1, ddi_dma_addr_bind_handle may return more
2127 	 * than one cookie, we check this in the if statement.
2128 	 */
2129 
2130 	if ((ret != DDI_DMA_MAPPED) ||
2131 	    (KMEMP(keyentry.kte_memhdl)->kmem_cookies_num != 1)) {
2132 		AGPDB_PRINT2((CE_WARN,
2133 		    "agp_alloc_kmem: can not alloc physical memory properly"));
2134 		goto err2;
2135 	}
2136 
2137 	keyentry.kte_pfnarray = (pfn_t *)kmem_zalloc(sizeof (pfn_t) *
2138 	    keyentry.kte_pages, KM_SLEEP);
2139 
2140 	if (kmem_getpfns(
2141 	    KMEMP(keyentry.kte_memhdl)->kmem_handle,
2142 	    &KMEMP(keyentry.kte_memhdl)->kmem_dcookie,
2143 	    KMEMP(keyentry.kte_memhdl)->kmem_cookies_num,
2144 	    keyentry.kte_pfnarray)) {
2145 		AGPDB_PRINT2((CE_WARN, "agp_alloc_kmem: get pfn array error"));
2146 		goto err1;
2147 	}
2148 
2149 	ASSERT(!agp_check_pfns(softstate->asoft_devreg.agprd_arctype,
2150 	    keyentry.kte_pfnarray, keyentry.kte_pages));
2151 	entryp = agp_fill_empty_keyent(softstate, &keyentry);
2152 	if (!entryp) {
2153 		AGPDB_PRINT2((CE_WARN,
2154 		    "agp_alloc_kmem: agp_fill_empty_keyent error"));
2155 
2156 		goto err1;
2157 	}
2158 	ASSERT((entryp->kte_key >= 0) && (entryp->kte_key < AGP_MAXKEYS));
2159 
2160 	return (entryp);
2161 
2162 err1:
2163 	kmem_free(keyentry.kte_pfnarray, sizeof (pfn_t) * keyentry.kte_pages);
2164 	keyentry.kte_pfnarray = NULL;
2165 	(void) ddi_dma_unbind_handle(KMEMP(keyentry.kte_memhdl)->kmem_handle);
2166 	KMEMP(keyentry.kte_memhdl)->kmem_cookies_num = 0;
2167 err2:
2168 	ddi_dma_mem_free(&KMEMP(keyentry.kte_memhdl)->kmem_acchdl);
2169 	KMEMP(keyentry.kte_memhdl)->kmem_acchdl = NULL;
2170 	KMEMP(keyentry.kte_memhdl)->kmem_reallen = 0;
2171 	KMEMP(keyentry.kte_memhdl)->kmem_kvaddr = NULL;
2172 err3:
2173 	ddi_dma_free_handle(&(KMEMP(keyentry.kte_memhdl)->kmem_handle));
2174 	KMEMP(keyentry.kte_memhdl)->kmem_handle = NULL;
2175 err4:
2176 	kmem_free(keyentry.kte_memhdl, sizeof (agp_kmem_handle_t));
2177 	keyentry.kte_memhdl = NULL;
2178 	return (NULL);
2179 
2180 }
2181 
2182 /*
2183  * agp_alloc_mem()
2184  *
2185  * Description:
2186  * 	This function allocate physical memory for userland applications,
2187  * 	in order to save kernel virtual space, we use the direct mapping
2188  * 	memory interface if it is available.
2189  *
2190  * Arguments:
2191  * 	st		driver soft state pointer
2192  * 	length		memory size
2193  * 	type		AGP_NORMAL: normal agp memory, AGP_PHISYCAL: specical
2194  *			memory type for intel i810 IGD
2195  *
2196  * Returns:
2197  * 	NULL 	Invalid memory type or can not allocate memory
2198  * 	Keytable entry pointer returned by agp_alloc_kmem or agp_alloc_pmem
2199  */
2200 static keytable_ent_t *
2201 agp_alloc_mem(agpgart_softstate_t *st, size_t length, int type)
2202 {
2203 
2204 	/*
2205 	 * AGP_PHYSICAL type require contiguous physical pages exported
2206 	 * to X drivers, like i810 HW cursor, ARGB cursor. the number of
2207 	 * pages needed is usuallysmall and contiguous, 4K, 16K. So we
2208 	 * use DDI interface to allocated such memory. And X use xsvc
2209 	 * drivers to map this memory into its own address space.
2210 	 */
2211 	ASSERT(st);
2212 
2213 	switch (type) {
2214 	case AGP_NORMAL:
2215 		return (agp_alloc_pmem(st, length, type));
2216 	case AGP_PHYSICAL:
2217 		return (agp_alloc_kmem(st, length));
2218 	default:
2219 		return (NULL);
2220 	}
2221 }
2222 
2223 /*
2224  * free_gart_table()
2225  *
2226  * Description:
2227  * 	This function frees the gart table memory allocated by driver.
2228  * 	Must disable gart table before calling this function.
2229  *
2230  * Arguments:
2231  * 	softstate		driver soft state pointer
2232  *
2233  */
2234 static void
2235 free_gart_table(agpgart_softstate_t *st)
2236 {
2237 
2238 	if (st->gart_dma_handle == NULL)
2239 		return;
2240 
2241 	(void) ddi_dma_unbind_handle(st->gart_dma_handle);
2242 	ddi_dma_mem_free(&st->gart_dma_acc_handle);
2243 	st->gart_dma_acc_handle = NULL;
2244 	ddi_dma_free_handle(&st->gart_dma_handle);
2245 	st->gart_dma_handle = NULL;
2246 	st->gart_vbase = 0;
2247 	st->gart_size = 0;
2248 }
2249 
2250 /*
2251  * alloc_gart_table()
2252  *
2253  * Description:
2254  * 	This function allocates one physical continuous gart table.
2255  * 	INTEL integrated video device except i810 have their special
2256  * 	video bios; No need to allocate gart table for them.
2257  *
2258  * Arguments:
2259  * 	st		driver soft state pointer
2260  *
2261  * Returns:
2262  * 	0		success
2263  * 	-1		can not allocate gart tabl
2264  */
2265 static int
2266 alloc_gart_table(agpgart_softstate_t *st)
2267 {
2268 	int			num_pages;
2269 	size_t			table_size;
2270 	int			ret = DDI_SUCCESS;
2271 	ddi_dma_cookie_t	cookie;
2272 	uint32_t		num_cookies;
2273 
2274 	num_pages = AGP_MB2PAGES(st->asoft_info.agpki_apersize);
2275 
2276 	/*
2277 	 * Only 40-bit maximum physical memory is supported by today's
2278 	 * AGP hardware (32-bit gart tables can hold 40-bit memory addresses).
2279 	 * No one supports 64-bit gart entries now, so the size of gart
2280 	 * entries defaults to 32-bit though AGP3.0 specifies the possibility
2281 	 * of 64-bit gart entries.
2282 	 */
2283 
2284 	table_size = num_pages * (sizeof (uint32_t));
2285 
2286 	/*
2287 	 * Only AMD64 can put gart table above 4G, 40 bits at maximum
2288 	 */
2289 	if (st->asoft_devreg.agprd_arctype == ARC_AMD64AGP)
2290 		garttable_dma_attr.dma_attr_addr_hi = 0xffffffffffLL;
2291 	else
2292 		garttable_dma_attr.dma_attr_addr_hi = 0xffffffffU;
2293 	/* Allocate physical continuous page frame for gart table */
2294 	if (ret = ddi_dma_alloc_handle(st->asoft_dip,
2295 	    &garttable_dma_attr,
2296 	    DDI_DMA_SLEEP,
2297 	    NULL, &st->gart_dma_handle)) {
2298 		AGPDB_PRINT2((CE_WARN,
2299 		    "alloc_gart_table: ddi_dma_alloc_handle failed"));
2300 		goto err3;
2301 	}
2302 
2303 	if (ret = ddi_dma_mem_alloc(st->gart_dma_handle,
2304 	    table_size,
2305 	    &gart_dev_acc_attr,
2306 	    DDI_DMA_CONSISTENT,
2307 	    DDI_DMA_SLEEP, NULL,
2308 	    &st->gart_vbase,
2309 	    &st->gart_size,
2310 	    &st->gart_dma_acc_handle)) {
2311 		AGPDB_PRINT2((CE_WARN,
2312 		    "alloc_gart_table: ddi_dma_mem_alloc failed"));
2313 		goto err2;
2314 
2315 	}
2316 
2317 	ret = ddi_dma_addr_bind_handle(st->gart_dma_handle,
2318 	    NULL, st->gart_vbase,
2319 	    table_size,
2320 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2321 	    DDI_DMA_SLEEP, NULL,
2322 	    &cookie,  &num_cookies);
2323 
2324 	st->gart_pbase = cookie.dmac_address;
2325 
2326 	if ((ret != DDI_DMA_MAPPED) || (num_cookies != 1)) {
2327 		if (num_cookies > 1)
2328 			(void) ddi_dma_unbind_handle(st->gart_dma_handle);
2329 		AGPDB_PRINT2((CE_WARN,
2330 		    "alloc_gart_table: alloc contiguous phys memory failed"));
2331 		goto err1;
2332 	}
2333 
2334 	return (0);
2335 err1:
2336 	ddi_dma_mem_free(&st->gart_dma_acc_handle);
2337 	st->gart_dma_acc_handle = NULL;
2338 err2:
2339 	ddi_dma_free_handle(&st->gart_dma_handle);
2340 	st->gart_dma_handle = NULL;
2341 err3:
2342 	st->gart_pbase = 0;
2343 	st->gart_size = 0;
2344 	st->gart_vbase = 0;
2345 
2346 	return (-1);
2347 }
2348 
2349 /*
2350  * agp_add_to_gart()
2351  *
2352  * Description:
2353  * 	This function fills the gart table entries by a given page frame number
2354  * 	array and set up the agp aperture page to physical memory page
2355  * 	translation.
2356  * Arguments:
2357  * 	type		valid sytem arc types ARC_AMD64AGP, ARC_INTELAGP,
2358  * 			ARC_AMD64AGP
2359  * 	pfnarray	allocated physical page frame number array
2360  * 	pg_offset	agp aperture start page to be bound
2361  * 	entries		the number of pages to be bound
2362  * 	dma_hdl		gart table dma memory handle
2363  * 	tablep		gart table kernel virtual address
2364  * Returns:
2365  * 	-1		failed
2366  * 	0		success
2367  */
2368 static int
2369 agp_add_to_gart(
2370     agp_arc_type_t type,
2371     pfn_t *pfnarray,
2372     uint32_t pg_offset,
2373     uint32_t entries,
2374     ddi_dma_handle_t dma_hdl,
2375     uint32_t *tablep)
2376 {
2377 	int items = 0;
2378 	uint32_t *entryp;
2379 	uint32_t itemv;
2380 
2381 	entryp = tablep + pg_offset;
2382 	while (items < entries) {
2383 		if (pfn2gartentry(type, pfnarray[items], &itemv))
2384 			break;
2385 		*(entryp + items) = itemv;
2386 		items++;
2387 	}
2388 	if (items < entries)
2389 		return (-1);
2390 
2391 	(void) ddi_dma_sync(dma_hdl, pg_offset * sizeof (uint32_t),
2392 	    entries * sizeof (uint32_t), DDI_DMA_SYNC_FORDEV);
2393 
2394 	return (0);
2395 }
2396 
2397 /*
2398  * agp_bind_key()
2399  *
2400  * Description:
2401  * 	This function will call low level gart table access functions to
2402  * 	set up gart table translation. Also it will do some sanity
2403  * 	checking on key table entry.
2404  *
2405  * Arguments:
2406  * 	softstate		driver soft state pointer
2407  * 	keyent			key table entity pointer to be bound
2408  * 	pg_offset		aperture start page to be bound
2409  * Returns:
2410  * 	EINVAL			not a valid operation
2411  */
2412 static int
2413 agp_bind_key(agpgart_softstate_t *softstate,
2414     keytable_ent_t  *keyent, uint32_t  pg_offset)
2415 {
2416 	uint64_t pg_end;
2417 	int ret = 0;
2418 
2419 	ASSERT(keyent);
2420 	ASSERT((keyent->kte_key >= 0) && (keyent->kte_key < AGP_MAXKEYS));
2421 	ASSERT(mutex_owned(&softstate->asoft_instmutex));
2422 
2423 	pg_end = pg_offset + keyent->kte_pages;
2424 
2425 	if (pg_end > AGP_MB2PAGES(softstate->asoft_info.agpki_apersize)) {
2426 		AGPDB_PRINT2((CE_WARN,
2427 		    "agp_bind_key: key=0x%x,exceed aper range",
2428 		    keyent->kte_key));
2429 
2430 		return (EINVAL);
2431 	}
2432 
2433 	if (agp_check_off(softstate->asoft_table,
2434 	    pg_offset, keyent->kte_pages)) {
2435 		AGPDB_PRINT2((CE_WARN,
2436 		    "agp_bind_key: pg_offset=0x%x, pages=0x%lx overlaped",
2437 		    pg_offset, keyent->kte_pages));
2438 		return (EINVAL);
2439 	}
2440 
2441 	ASSERT(keyent->kte_pfnarray != NULL);
2442 
2443 	switch (softstate->asoft_devreg.agprd_arctype) {
2444 	case ARC_IGD810:
2445 	case ARC_IGD830:
2446 		ret = lyr_i8xx_add_to_gtt(pg_offset, keyent,
2447 		    &softstate->asoft_devreg);
2448 		if (ret)
2449 			return (EIO);
2450 		break;
2451 	case ARC_INTELAGP:
2452 	case ARC_AMD64AGP:
2453 		ret =  agp_add_to_gart(
2454 		    softstate->asoft_devreg.agprd_arctype,
2455 		    keyent->kte_pfnarray,
2456 		    pg_offset,
2457 		    keyent->kte_pages,
2458 		    softstate->gart_dma_handle,
2459 		    (uint32_t *)softstate->gart_vbase);
2460 		if (ret)
2461 			return (EINVAL);
2462 		/* Flush GTLB table */
2463 		lyr_flush_gart_cache(&softstate->asoft_devreg);
2464 		break;
2465 	default:
2466 		AGPDB_PRINT2((CE_WARN,
2467 		    "agp_bind_key: arc type = 0x%x unsupported",
2468 		    softstate->asoft_devreg.agprd_arctype));
2469 		return (EINVAL);
2470 	}
2471 	return (0);
2472 }
2473 
2474 static int
2475 agpgart_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
2476 {
2477 	int instance;
2478 	agpgart_softstate_t *softstate;
2479 
2480 	if (cmd != DDI_ATTACH) {
2481 		AGPDB_PRINT2((CE_WARN,
2482 		    "agpgart_attach: only attach op supported"));
2483 		return (DDI_FAILURE);
2484 	}
2485 	instance = ddi_get_instance(dip);
2486 
2487 	if (ddi_soft_state_zalloc(agpgart_glob_soft_handle, instance)
2488 	    != DDI_SUCCESS) {
2489 		AGPDB_PRINT2((CE_WARN,
2490 		    "agpgart_attach: soft state zalloc failed"));
2491 		goto err1;
2492 
2493 	}
2494 	softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
2495 	mutex_init(&softstate->asoft_instmutex, NULL, MUTEX_DRIVER, NULL);
2496 	softstate->asoft_dip = dip;
2497 	/*
2498 	 * Allocate LDI identifier for agpgart driver
2499 	 * Agpgart driver is the kernel consumer
2500 	 */
2501 	if (ldi_ident_from_dip(dip, &softstate->asoft_li)) {
2502 		AGPDB_PRINT2((CE_WARN,
2503 		    "agpgart_attach: LDI indentifier allcation failed"));
2504 		goto err2;
2505 	}
2506 
2507 	softstate->asoft_devreg.agprd_arctype = ARC_UNKNOWN;
2508 	/* Install agp kstat */
2509 	if (agp_init_kstats(softstate)) {
2510 		AGPDB_PRINT2((CE_WARN, "agpgart_attach: init kstats error"));
2511 		goto err3;
2512 	}
2513 	/*
2514 	 * devfs will create /dev/agpgart
2515 	 * and  /devices/agpgart:agpgart
2516 	 */
2517 
2518 	if (ddi_create_minor_node(dip, AGPGART_DEVNODE, S_IFCHR,
2519 	    AGP_INST2MINOR(instance),
2520 	    DDI_NT_AGP_PSEUDO, 0)) {
2521 		AGPDB_PRINT2((CE_WARN,
2522 		    "agpgart_attach: Can not create minor node"));
2523 		goto err4;
2524 	}
2525 
2526 	softstate->asoft_table = kmem_zalloc(
2527 	    AGP_MAXKEYS * (sizeof (keytable_ent_t)),
2528 	    KM_SLEEP);
2529 
2530 	return (DDI_SUCCESS);
2531 err4:
2532 	agp_fini_kstats(softstate);
2533 err3:
2534 	ldi_ident_release(softstate->asoft_li);
2535 err2:
2536 	ddi_soft_state_free(agpgart_glob_soft_handle, instance);
2537 err1:
2538 	return (DDI_FAILURE);
2539 }
2540 
2541 static int
2542 agpgart_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
2543 {
2544 	int instance;
2545 	agpgart_softstate_t *st;
2546 
2547 	instance = ddi_get_instance(dip);
2548 
2549 	st = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
2550 
2551 	if (cmd != DDI_DETACH)
2552 		return (DDI_FAILURE);
2553 
2554 	/*
2555 	 * Caller should free all the memory allocated explicitly.
2556 	 * We release the memory allocated by caller which is not
2557 	 * properly freed. mutex_enter here make sure assertion on
2558 	 * softstate mutex success in agp_dealloc_mem.
2559 	 */
2560 	mutex_enter(&st->asoft_instmutex);
2561 	if (agp_del_allkeys(st)) {
2562 		AGPDB_PRINT2((CE_WARN, "agpgart_detach: agp_del_allkeys err"));
2563 		AGPDB_PRINT2((CE_WARN,
2564 		    "you might free agp memory exported to your applications"));
2565 
2566 		mutex_exit(&st->asoft_instmutex);
2567 		return (DDI_FAILURE);
2568 	}
2569 	mutex_exit(&st->asoft_instmutex);
2570 	if (st->asoft_table) {
2571 		kmem_free(st->asoft_table,
2572 		    AGP_MAXKEYS * (sizeof (keytable_ent_t)));
2573 		st->asoft_table = 0;
2574 	}
2575 
2576 	ddi_remove_minor_node(dip, AGPGART_DEVNODE);
2577 	agp_fini_kstats(st);
2578 	ldi_ident_release(st->asoft_li);
2579 	mutex_destroy(&st->asoft_instmutex);
2580 	ddi_soft_state_free(agpgart_glob_soft_handle, instance);
2581 
2582 	return (DDI_SUCCESS);
2583 }
2584 
2585 /*ARGSUSED*/
2586 static int
2587 agpgart_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg,
2588     void **resultp)
2589 {
2590 	agpgart_softstate_t *st;
2591 	int instance, rval = DDI_FAILURE;
2592 	dev_t dev;
2593 
2594 	switch (cmd) {
2595 	case DDI_INFO_DEVT2DEVINFO:
2596 		dev = (dev_t)arg;
2597 		instance = AGP_DEV2INST(dev);
2598 		st = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
2599 		if (st != NULL) {
2600 			mutex_enter(&st->asoft_instmutex);
2601 			*resultp = st->asoft_dip;
2602 			mutex_exit(&st->asoft_instmutex);
2603 			rval = DDI_SUCCESS;
2604 		} else
2605 			*resultp = NULL;
2606 
2607 		break;
2608 	case DDI_INFO_DEVT2INSTANCE:
2609 		dev = (dev_t)arg;
2610 		instance = AGP_DEV2INST(dev);
2611 		*resultp = (void *)(uintptr_t)instance;
2612 		rval = DDI_SUCCESS;
2613 
2614 		break;
2615 	default:
2616 		break;
2617 	}
2618 
2619 	return (rval);
2620 }
2621 
2622 /*
2623  * agpgart_open()
2624  *
2625  * Description:
2626  * 	This function is the driver open entry point. If it is the
2627  * 	first time the agpgart driver is opened, the driver will
2628  * 	open other agp related layered drivers and set up the agpgart
2629  * 	table properly.
2630  *
2631  * Arguments:
2632  * 	dev			device number pointer
2633  * 	openflags		open flags
2634  *	otyp			OTYP_BLK, OTYP_CHR
2635  * 	credp			user's credential's struct pointer
2636  *
2637  * Returns:
2638  * 	ENXIO			operation error
2639  * 	EAGAIN			resoure temporarily unvailable
2640  * 	0			success
2641  */
2642 /*ARGSUSED*/
2643 static int
2644 agpgart_open(dev_t *dev, int openflags, int otyp, cred_t *credp)
2645 {
2646 	int instance = AGP_DEV2INST(*dev);
2647 	agpgart_softstate_t *softstate;
2648 	int rc = 0;
2649 
2650 	softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
2651 	if (softstate == NULL) {
2652 		AGPDB_PRINT2((CE_WARN, "agpgart_open: get soft state err"));
2653 		return (ENXIO);
2654 	}
2655 	mutex_enter(&softstate->asoft_instmutex);
2656 
2657 	if (softstate->asoft_opened) {
2658 		softstate->asoft_opened++;
2659 		mutex_exit(&softstate->asoft_instmutex);
2660 		return (0);
2661 	}
2662 
2663 	/*
2664 	 * The driver is opened first time, so we initialize layered
2665 	 * driver interface and softstate member here.
2666 	 */
2667 	softstate->asoft_pgused = 0;
2668 	if (lyr_init(&softstate->asoft_devreg, softstate->asoft_li)) {
2669 		AGPDB_PRINT2((CE_WARN, "agpgart_open: lyr_init failed"));
2670 		mutex_exit(&softstate->asoft_instmutex);
2671 		return (EAGAIN);
2672 	}
2673 
2674 	/* Call into layered driver */
2675 	if (lyr_get_info(&softstate->asoft_info, &softstate->asoft_devreg)) {
2676 		AGPDB_PRINT2((CE_WARN, "agpgart_open: lyr_get_info error"));
2677 		lyr_end(&softstate->asoft_devreg);
2678 		mutex_exit(&softstate->asoft_instmutex);
2679 		return (EIO);
2680 	}
2681 
2682 	/*
2683 	 * BIOS already set up gtt table for ARC_IGD830
2684 	 */
2685 	if (IS_INTEL_830(softstate->asoft_devreg.agprd_arctype)) {
2686 		softstate->asoft_opened++;
2687 
2688 		softstate->asoft_pgtotal =
2689 		    get_max_pages(softstate->asoft_info.agpki_apersize);
2690 
2691 		if (lyr_config_devices(&softstate->asoft_devreg)) {
2692 			AGPDB_PRINT2((CE_WARN,
2693 			    "agpgart_open: lyr_config_devices error"));
2694 			lyr_end(&softstate->asoft_devreg);
2695 			mutex_exit(&softstate->asoft_instmutex);
2696 
2697 			return (EIO);
2698 		}
2699 		mutex_exit(&softstate->asoft_instmutex);
2700 		return (0);
2701 	}
2702 
2703 	rc = alloc_gart_table(softstate);
2704 
2705 	/*
2706 	 * Allocate physically contiguous pages for AGP arc or
2707 	 * i810 arc. If failed, divide aper_size by 2 to
2708 	 * reduce gart table size until 4 megabytes. This
2709 	 * is just a workaround for systems with very few
2710 	 * physically contiguous memory.
2711 	 */
2712 	if (rc) {
2713 		while ((softstate->asoft_info.agpki_apersize >= 4) &&
2714 		    (alloc_gart_table(softstate))) {
2715 			softstate->asoft_info.agpki_apersize >>= 1;
2716 		}
2717 		if (softstate->asoft_info.agpki_apersize >= 4)
2718 			rc = 0;
2719 	}
2720 
2721 	if (rc != 0) {
2722 		AGPDB_PRINT2((CE_WARN,
2723 		    "agpgart_open: alloc gart table failed"));
2724 		lyr_end(&softstate->asoft_devreg);
2725 		mutex_exit(&softstate->asoft_instmutex);
2726 		return (EAGAIN);
2727 	}
2728 
2729 	softstate->asoft_pgtotal =
2730 	    get_max_pages(softstate->asoft_info.agpki_apersize);
2731 	/*
2732 	 * BIOS doesn't initialize GTT for i810,
2733 	 * So i810 GTT must be created by driver.
2734 	 *
2735 	 * Set up gart table and enable it.
2736 	 */
2737 	if (lyr_set_gart_addr(softstate->gart_pbase,
2738 	    &softstate->asoft_devreg)) {
2739 		AGPDB_PRINT2((CE_WARN,
2740 		    "agpgart_open: set gart table addr failed"));
2741 		free_gart_table(softstate);
2742 		lyr_end(&softstate->asoft_devreg);
2743 		mutex_exit(&softstate->asoft_instmutex);
2744 		return (EIO);
2745 	}
2746 	if (lyr_config_devices(&softstate->asoft_devreg)) {
2747 		AGPDB_PRINT2((CE_WARN,
2748 		    "agpgart_open: lyr_config_devices failed"));
2749 		free_gart_table(softstate);
2750 		lyr_end(&softstate->asoft_devreg);
2751 		mutex_exit(&softstate->asoft_instmutex);
2752 		return (EIO);
2753 	}
2754 
2755 	softstate->asoft_opened++;
2756 	mutex_exit(&softstate->asoft_instmutex);
2757 
2758 	return (0);
2759 }
2760 
2761 /*
2762  * agpgart_close()
2763  *
2764  * Description:
2765  * 	agpgart_close will release resources allocated in the first open
2766  * 	and close other open layered drivers. Also it frees the memory
2767  *	allocated by ioctls.
2768  *
2769  * Arguments:
2770  * 	dev			device number
2771  * 	flag			file status flag
2772  *	otyp			OTYP_BLK, OTYP_CHR
2773  * 	credp			user's credential's struct pointer
2774  *
2775  * Returns:
2776  * 	ENXIO			not an error, to support "deferred attach"
2777  * 	0			success
2778  */
2779 /*ARGSUSED*/
2780 static int
2781 agpgart_close(dev_t dev, int flag, int otyp, cred_t *credp)
2782 {
2783 	int instance = AGP_DEV2INST(dev);
2784 	agpgart_softstate_t *softstate;
2785 
2786 	softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
2787 	if (softstate == NULL) {
2788 		AGPDB_PRINT2((CE_WARN, "agpgart_close: get soft state err"));
2789 		return (ENXIO);
2790 	}
2791 
2792 	mutex_enter(&softstate->asoft_instmutex);
2793 	ASSERT(softstate->asoft_opened);
2794 
2795 
2796 	/*
2797 	 * If the last process close this device is not the controlling
2798 	 * process, also release the control over agpgart driver here if the
2799 	 * the controlling process fails to release the control before it
2800 	 * close the driver.
2801 	 */
2802 	if (softstate->asoft_acquired == 1) {
2803 		AGPDB_PRINT2((CE_WARN,
2804 		    "agpgart_close: auto release control over driver"));
2805 		release_control(softstate);
2806 	}
2807 
2808 	if (lyr_unconfig_devices(&softstate->asoft_devreg)) {
2809 		AGPDB_PRINT2((CE_WARN,
2810 		    "agpgart_close: lyr_unconfig_device error"));
2811 		mutex_exit(&softstate->asoft_instmutex);
2812 		return (EIO);
2813 	}
2814 	softstate->asoft_agpen = 0;
2815 
2816 	if (!IS_INTEL_830(softstate->asoft_devreg.agprd_arctype)) {
2817 		free_gart_table(softstate);
2818 	}
2819 
2820 	lyr_end(&softstate->asoft_devreg);
2821 
2822 	/*
2823 	 * This statement must be positioned before agp_del_allkeys
2824 	 * agp_dealloc_mem indirectly called by agp_del_allkeys
2825 	 * will test this variable.
2826 	 */
2827 	softstate->asoft_opened = 0;
2828 
2829 	/*
2830 	 * Free the memory allocated by user applications which
2831 	 * was never deallocated.
2832 	 */
2833 	(void) agp_del_allkeys(softstate);
2834 
2835 	mutex_exit(&softstate->asoft_instmutex);
2836 
2837 	return (0);
2838 }
2839 
2840 static int
2841 ioctl_agpgart_info(agpgart_softstate_t  *softstate, void  *arg, int flags)
2842 {
2843 	agp_info_t infostruct;
2844 #ifdef _MULTI_DATAMODEL
2845 	agp_info32_t infostruct32;
2846 #endif
2847 
2848 	bzero(&infostruct, sizeof (agp_info_t));
2849 
2850 #ifdef _MULTI_DATAMODEL
2851 	bzero(&infostruct32, sizeof (agp_info32_t));
2852 	if (ddi_model_convert_from(flags & FMODELS) == DDI_MODEL_ILP32) {
2853 		if (copyinfo(softstate, &infostruct))
2854 			return (EINVAL);
2855 
2856 		agpinfo_default_to_32(infostruct, infostruct32);
2857 		if (ddi_copyout(&infostruct32, arg,
2858 		    sizeof (agp_info32_t), flags) != 0)
2859 			return (EFAULT);
2860 
2861 		return (0);
2862 	}
2863 #endif /* _MULTI_DATAMODEL */
2864 	if (copyinfo(softstate, &infostruct))
2865 		return (EINVAL);
2866 
2867 	if (ddi_copyout(&infostruct, arg, sizeof (agp_info_t), flags) != 0) {
2868 		return (EFAULT);
2869 	}
2870 
2871 	return (0);
2872 }
2873 
2874 static int
2875 ioctl_agpgart_acquire(agpgart_softstate_t  *st)
2876 {
2877 	if (st->asoft_acquired) {
2878 		AGPDB_PRINT2((CE_WARN, "ioctl_acquire: already acquired"));
2879 		return (EBUSY);
2880 	}
2881 	acquire_control(st);
2882 	return (0);
2883 }
2884 
2885 static int
2886 ioctl_agpgart_release(agpgart_softstate_t  *st)
2887 {
2888 	if (is_controlling_proc(st) < 0) {
2889 		AGPDB_PRINT2((CE_WARN,
2890 		    "ioctl_agpgart_release: not a controlling process"));
2891 		return (EPERM);
2892 	}
2893 	release_control(st);
2894 	return (0);
2895 }
2896 
2897 static int
2898 ioctl_agpgart_setup(agpgart_softstate_t  *st, void  *arg, int flags)
2899 {
2900 	agp_setup_t data;
2901 	int rc = 0;
2902 
2903 	if (is_controlling_proc(st) < 0) {
2904 		AGPDB_PRINT2((CE_WARN,
2905 		    "ioctl_agpgart_setup: not a controlling process"));
2906 		return (EPERM);
2907 	}
2908 
2909 	if (!IS_TRUE_AGP(st->asoft_devreg.agprd_arctype)) {
2910 		AGPDB_PRINT2((CE_WARN,
2911 		    "ioctl_agpgart_setup: no true agp bridge"));
2912 		return (EINVAL);
2913 	}
2914 
2915 	if (ddi_copyin(arg, &data, sizeof (agp_setup_t), flags) != 0)
2916 		return (EFAULT);
2917 
2918 	if (rc = agp_setup(st, data.agps_mode))
2919 		return (rc);
2920 	/* Store agp mode status for kstat */
2921 	st->asoft_agpen = 1;
2922 	return (0);
2923 }
2924 
2925 static int
2926 ioctl_agpgart_alloc(agpgart_softstate_t  *st, void  *arg, int flags)
2927 {
2928 	agp_allocate_t	alloc_info;
2929 	keytable_ent_t	*entryp;
2930 	size_t		length;
2931 	uint64_t	pg_num;
2932 
2933 	if (is_controlling_proc(st) < 0) {
2934 		AGPDB_PRINT2((CE_WARN,
2935 		    "ioctl_agpgart_alloc: not a controlling process"));
2936 		return (EPERM);
2937 	}
2938 
2939 	if (ddi_copyin(arg, &alloc_info,
2940 	    sizeof (agp_allocate_t), flags) != 0) {
2941 		return (EFAULT);
2942 	}
2943 	pg_num = st->asoft_pgused + alloc_info.agpa_pgcount;
2944 	if (pg_num > st->asoft_pgtotal) {
2945 		AGPDB_PRINT2((CE_WARN,
2946 		    "ioctl_agpgart_alloc: exceeding the memory pages limit"));
2947 		AGPDB_PRINT2((CE_WARN,
2948 		    "ioctl_agpgart_alloc: request %x pages failed",
2949 		    alloc_info.agpa_pgcount));
2950 		AGPDB_PRINT2((CE_WARN,
2951 		    "ioctl_agpgart_alloc: pages used %x total is %x",
2952 		    st->asoft_pgused, st->asoft_pgtotal));
2953 
2954 		return (EINVAL);
2955 	}
2956 
2957 	length = AGP_PAGES2BYTES(alloc_info.agpa_pgcount);
2958 	entryp = agp_alloc_mem(st, length, alloc_info.agpa_type);
2959 	if (!entryp) {
2960 		AGPDB_PRINT2((CE_WARN,
2961 		    "ioctl_agpgart_alloc: allocate 0x%lx bytes failed",
2962 		    length));
2963 		return (ENOMEM);
2964 	}
2965 	ASSERT((entryp->kte_key >= 0) && (entryp->kte_key < AGP_MAXKEYS));
2966 	alloc_info.agpa_key = entryp->kte_key;
2967 	if (alloc_info.agpa_type == AGP_PHYSICAL) {
2968 		alloc_info.agpa_physical =
2969 		    (uint32_t)(entryp->kte_pfnarray[0] << AGP_PAGE_SHIFT);
2970 	}
2971 	/* Update the memory pagse used */
2972 	st->asoft_pgused += alloc_info.agpa_pgcount;
2973 
2974 	if (ddi_copyout(&alloc_info, arg,
2975 	    sizeof (agp_allocate_t), flags) != 0) {
2976 
2977 		return (EFAULT);
2978 	}
2979 
2980 	return (0);
2981 }
2982 
2983 static int
2984 ioctl_agpgart_dealloc(agpgart_softstate_t  *st, intptr_t arg)
2985 {
2986 	int key;
2987 	keytable_ent_t  *keyent;
2988 
2989 	if (is_controlling_proc(st) < 0) {
2990 		AGPDB_PRINT2((CE_WARN,
2991 		    "ioctl_agpgart_dealloc: not a controlling process"));
2992 		return (EPERM);
2993 	}
2994 	key = (int)arg;
2995 	if ((key >= AGP_MAXKEYS) || key < 0) {
2996 		return (EINVAL);
2997 	}
2998 	keyent = &st->asoft_table[key];
2999 	if (!keyent->kte_memhdl) {
3000 		return (EINVAL);
3001 	}
3002 
3003 	if (agp_dealloc_mem(st, keyent))
3004 		return (EINVAL);
3005 
3006 	/* Update the memory pages used */
3007 	st->asoft_pgused -= keyent->kte_pages;
3008 	bzero(keyent, sizeof (keytable_ent_t));
3009 
3010 	return (0);
3011 }
3012 
3013 static int
3014 ioctl_agpgart_bind(agpgart_softstate_t  *st, void  *arg, int flags)
3015 {
3016 	agp_bind_t 	bind_info;
3017 	keytable_ent_t	*keyent;
3018 	int		key;
3019 	uint32_t	pg_offset;
3020 	int		retval = 0;
3021 
3022 	if (is_controlling_proc(st) < 0) {
3023 		AGPDB_PRINT2((CE_WARN,
3024 		    "ioctl_agpgart_bind: not a controlling process"));
3025 		return (EPERM);
3026 	}
3027 
3028 	if (ddi_copyin(arg, &bind_info, sizeof (agp_bind_t), flags) != 0) {
3029 		return (EFAULT);
3030 	}
3031 
3032 	key = bind_info.agpb_key;
3033 	if ((key >= AGP_MAXKEYS) || key < 0) {
3034 		AGPDB_PRINT2((CE_WARN, "ioctl_agpgart_bind: invalid key"));
3035 		return (EINVAL);
3036 	}
3037 
3038 	if (IS_INTEL_830(st->asoft_devreg.agprd_arctype)) {
3039 		if (AGP_PAGES2KB(bind_info.agpb_pgstart) <
3040 		    st->asoft_info.agpki_presize) {
3041 			AGPDB_PRINT2((CE_WARN,
3042 			    "ioctl_agpgart_bind: bind to prealloc area "
3043 			    "pgstart = %dKB < presize = %ldKB",
3044 			    AGP_PAGES2KB(bind_info.agpb_pgstart),
3045 			    st->asoft_info.agpki_presize));
3046 			return (EINVAL);
3047 		}
3048 	}
3049 
3050 	pg_offset = bind_info.agpb_pgstart;
3051 	keyent = &st->asoft_table[key];
3052 	if (!keyent->kte_memhdl) {
3053 		AGPDB_PRINT2((CE_WARN,
3054 		    "ioctl_agpgart_bind: Key = 0x%x can't get keyenty",
3055 		    key));
3056 		return (EINVAL);
3057 	}
3058 
3059 	if (keyent->kte_bound != 0) {
3060 		AGPDB_PRINT2((CE_WARN,
3061 		    "ioctl_agpgart_bind: Key = 0x%x already bound",
3062 		    key));
3063 		return (EINVAL);
3064 	}
3065 	retval = agp_bind_key(st, keyent, pg_offset);
3066 
3067 	if (retval == 0) {
3068 		keyent->kte_pgoff = pg_offset;
3069 		keyent->kte_bound = 1;
3070 	}
3071 
3072 	return (retval);
3073 }
3074 
3075 static int
3076 ioctl_agpgart_unbind(agpgart_softstate_t  *st, void  *arg, int flags)
3077 {
3078 	int key, retval = 0;
3079 	agp_unbind_t unbindinfo;
3080 	keytable_ent_t *keyent;
3081 
3082 	if (is_controlling_proc(st) < 0) {
3083 		AGPDB_PRINT2((CE_WARN,
3084 		    "ioctl_agpgart_bind: not a controlling process"));
3085 		return (EPERM);
3086 	}
3087 
3088 	if (ddi_copyin(arg, &unbindinfo, sizeof (unbindinfo), flags) != 0) {
3089 		return (EFAULT);
3090 	}
3091 	key = unbindinfo.agpu_key;
3092 	if ((key >= AGP_MAXKEYS) || key < 0) {
3093 		AGPDB_PRINT2((CE_WARN, "ioctl_agpgart_unbind: invalid key"));
3094 		return (EINVAL);
3095 	}
3096 	keyent = &st->asoft_table[key];
3097 	if (!keyent->kte_bound) {
3098 		return (EINVAL);
3099 	}
3100 
3101 	if ((retval = agp_unbind_key(st, keyent)) != 0)
3102 		return (retval);
3103 
3104 	return (0);
3105 }
3106 
3107 /*ARGSUSED*/
3108 static int
3109 agpgart_ioctl(dev_t dev, int cmd, intptr_t intarg, int flags,
3110     cred_t *credp, int *rvalp)
3111 {
3112 	int instance;
3113 	int retval = 0;
3114 	void *arg = (void*)intarg;
3115 
3116 	agpgart_softstate_t *softstate;
3117 
3118 	instance = AGP_DEV2INST(dev);
3119 	softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
3120 	if (softstate == NULL) {
3121 		AGPDB_PRINT2((CE_WARN, "agpgart_ioctl: get soft state err"));
3122 		return (ENXIO);
3123 	}
3124 
3125 	if ((cmd != AGPIOC_INFO) && secpolicy_gart_access(credp)) {
3126 		AGPDB_PRINT2((CE_WARN, "agpgart_ioctl: permission denied"));
3127 		return (EPERM);
3128 	}
3129 
3130 	mutex_enter(&softstate->asoft_instmutex);
3131 
3132 	switch (cmd) {
3133 	case AGPIOC_INFO:
3134 		retval = ioctl_agpgart_info(softstate, arg, flags);
3135 		break;
3136 	case AGPIOC_ACQUIRE:
3137 		retval = ioctl_agpgart_acquire(softstate);
3138 		break;
3139 	case AGPIOC_RELEASE:
3140 		retval = ioctl_agpgart_release(softstate);
3141 		break;
3142 	case AGPIOC_SETUP:
3143 		retval = ioctl_agpgart_setup(softstate, arg, flags);
3144 		break;
3145 	case AGPIOC_ALLOCATE:
3146 		retval = ioctl_agpgart_alloc(softstate, arg, flags);
3147 		break;
3148 	case AGPIOC_DEALLOCATE:
3149 		retval = ioctl_agpgart_dealloc(softstate, intarg);
3150 		break;
3151 	case AGPIOC_BIND:
3152 		retval = ioctl_agpgart_bind(softstate, arg, flags);
3153 		break;
3154 	case AGPIOC_UNBIND:
3155 		retval = ioctl_agpgart_unbind(softstate, arg, flags);
3156 		break;
3157 	default:
3158 		AGPDB_PRINT2((CE_WARN, "agpgart_ioctl: wrong argument"));
3159 		retval = ENXIO;
3160 		break;
3161 	}
3162 
3163 	mutex_exit(&softstate->asoft_instmutex);
3164 	return (retval);
3165 }
3166 
3167 static int
3168 agpgart_segmap(dev_t dev, off_t off, struct as *asp,
3169     caddr_t *addrp, off_t len, unsigned int prot,
3170     unsigned int maxprot, unsigned int flags, cred_t *credp)
3171 {
3172 
3173 	struct agpgart_softstate *softstate;
3174 	int instance;
3175 	int rc = 0;
3176 
3177 	instance = AGP_DEV2INST(dev);
3178 	softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
3179 	if (softstate == NULL) {
3180 		AGPDB_PRINT2((CE_WARN, "agpgart_segmap: get soft state err"));
3181 		return (ENXIO);
3182 	}
3183 	if (!AGP_ALIGNED(len))
3184 		return (EINVAL);
3185 
3186 	mutex_enter(&softstate->asoft_instmutex);
3187 
3188 	/*
3189 	 * Process must have gart map privilege or gart access privilege
3190 	 * to map agp memory.
3191 	 */
3192 	if (secpolicy_gart_map(credp)) {
3193 		mutex_exit(&softstate->asoft_instmutex);
3194 		AGPDB_PRINT2((CE_WARN, "agpgart_segmap: permission denied"));
3195 		return (EPERM);
3196 	}
3197 
3198 	rc = devmap_setup(dev, (offset_t)off, asp, addrp,
3199 	    (size_t)len, prot, maxprot, flags, credp);
3200 
3201 	mutex_exit(&softstate->asoft_instmutex);
3202 	return (rc);
3203 }
3204 
3205 /*ARGSUSED*/
3206 static int
3207 agpgart_devmap(dev_t dev, devmap_cookie_t cookie, offset_t offset, size_t len,
3208     size_t *mappedlen, uint_t model)
3209 {
3210 	struct agpgart_softstate *softstate;
3211 	int instance, status;
3212 	struct keytable_ent *mementry;
3213 	offset_t local_offset;
3214 
3215 	instance = AGP_DEV2INST(dev);
3216 	softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
3217 	if (softstate == NULL) {
3218 		AGPDB_PRINT2((CE_WARN, "agpgart_devmap: get soft state err"));
3219 		return (ENXIO);
3220 	}
3221 
3222 
3223 	if (offset > MB2BYTES(softstate->asoft_info.agpki_apersize)) {
3224 		AGPDB_PRINT2((CE_WARN, "agpgart_devmap: offset is too large"));
3225 		return (EINVAL);
3226 	}
3227 
3228 	/*
3229 	 * Can not find any memory now, so fail.
3230 	 */
3231 
3232 	mementry = agp_find_bound_keyent(softstate, AGP_BYTES2PAGES(offset));
3233 
3234 	if (mementry == NULL) {
3235 		AGPDB_PRINT2((CE_WARN,
3236 		    "agpgart_devmap: can not find the proper keyent"));
3237 		return (EINVAL);
3238 	}
3239 
3240 	local_offset = offset - AGP_PAGES2BYTES(mementry->kte_pgoff);
3241 
3242 	if (len > (AGP_PAGES2BYTES(mementry->kte_pages) - local_offset)) {
3243 		len = AGP_PAGES2BYTES(mementry->kte_pages) - local_offset;
3244 	}
3245 
3246 	switch (mementry->kte_type) {
3247 	case AGP_NORMAL:
3248 		status = devmap_pmem_setup(cookie, softstate->asoft_dip,
3249 		    &agp_devmap_cb,
3250 		    PMEMP(mementry->kte_memhdl)->pmem_cookie, local_offset,
3251 		    len, PROT_ALL, (DEVMAP_DEFAULTS|IOMEM_DATA_UC_WR_COMBINE),
3252 		    &mem_dev_acc_attr);
3253 		break;
3254 	default:
3255 		AGPDB_PRINT2((CE_WARN,
3256 		    "agpgart_devmap: not a valid memory type"));
3257 		return (EINVAL);
3258 	}
3259 
3260 
3261 	if (status == 0) {
3262 		*mappedlen = len;
3263 	} else {
3264 		*mappedlen = 0;
3265 		AGPDB_PRINT2((CE_WARN,
3266 		    "agpgart_devmap: devmap interface failed"));
3267 		return (EINVAL);
3268 	}
3269 
3270 	return (0);
3271 }
3272 
3273 static struct cb_ops	agpgart_cb_ops = {
3274 	agpgart_open,		/* open() */
3275 	agpgart_close,		/* close() */
3276 	nodev,			/* strategy() */
3277 	nodev,			/* print routine */
3278 	nodev,			/* no dump routine */
3279 	nodev,			/* read() */
3280 	nodev,			/* write() */
3281 	agpgart_ioctl,		/* agpgart_ioctl */
3282 	agpgart_devmap,		/* devmap routine */
3283 	nodev,			/* no longer use mmap routine */
3284 	agpgart_segmap,		/* system segmap routine */
3285 	nochpoll,		/* no chpoll routine */
3286 	ddi_prop_op,		/* system prop operations */
3287 	0,			/* not a STREAMS driver */
3288 	D_DEVMAP | D_MP,	/* safe for multi-thread/multi-processor */
3289 	CB_REV,			/* cb_ops version? */
3290 	nodev,			/* cb_aread() */
3291 	nodev,			/* cb_awrite() */
3292 };
3293 
3294 static struct dev_ops agpgart_ops = {
3295 	DEVO_REV,		/* devo_rev */
3296 	0,			/* devo_refcnt */
3297 	agpgart_getinfo,	/* devo_getinfo */
3298 	nulldev,		/* devo_identify */
3299 	nulldev,		/* devo_probe */
3300 	agpgart_attach,		/* devo_attach */
3301 	agpgart_detach,		/* devo_detach */
3302 	nodev,			/* devo_reset */
3303 	&agpgart_cb_ops,	/* devo_cb_ops */
3304 	(struct bus_ops *)0,	/* devo_bus_ops */
3305 	NULL,			/* devo_power */
3306 };
3307 
3308 static	struct modldrv modldrv = {
3309 	&mod_driverops,
3310 	"AGP driver v1.9",
3311 	&agpgart_ops,
3312 };
3313 
3314 static struct modlinkage modlinkage = {
3315 	MODREV_1,		/* MODREV_1 is indicated by manual */
3316 	{&modldrv, NULL, NULL, NULL}
3317 };
3318 
3319 static void *agpgart_glob_soft_handle;
3320 
3321 int
3322 _init(void)
3323 {
3324 	int ret = DDI_SUCCESS;
3325 
3326 	ret = ddi_soft_state_init(&agpgart_glob_soft_handle,
3327 	    sizeof (agpgart_softstate_t),
3328 	    AGPGART_MAX_INSTANCES);
3329 
3330 	if (ret != 0) {
3331 		AGPDB_PRINT2((CE_WARN,
3332 		    "_init: soft state init error code=0x%x", ret));
3333 		return (ret);
3334 	}
3335 
3336 	if ((ret = mod_install(&modlinkage)) != 0) {
3337 		AGPDB_PRINT2((CE_WARN,
3338 		    "_init: mod install error code=0x%x", ret));
3339 		ddi_soft_state_fini(&agpgart_glob_soft_handle);
3340 		return (ret);
3341 	}
3342 
3343 	return (DDI_SUCCESS);
3344 }
3345 
3346 int
3347 _info(struct modinfo *modinfop)
3348 {
3349 	return (mod_info(&modlinkage, modinfop));
3350 }
3351 
3352 int
3353 _fini(void)
3354 {
3355 	int ret;
3356 
3357 	if ((ret = mod_remove(&modlinkage)) == 0) {
3358 		ddi_soft_state_fini(&agpgart_glob_soft_handle);
3359 	}
3360 
3361 	return (ret);
3362 }
3363