1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2000 Doug Rabson
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 #include "opt_agp.h"
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/malloc.h>
35 #include <sys/kernel.h>
36 #include <sys/module.h>
37 #include <sys/bus.h>
38 #include <sys/conf.h>
39 #include <sys/ioccom.h>
40 #include <sys/agpio.h>
41 #include <sys/lock.h>
42 #include <sys/mutex.h>
43 #include <sys/proc.h>
44 #include <sys/rwlock.h>
45
46 #include <dev/agp/agppriv.h>
47 #include <dev/agp/agpvar.h>
48 #include <dev/agp/agpreg.h>
49 #include <dev/pci/pcivar.h>
50 #include <dev/pci/pcireg.h>
51
52 #include <vm/vm.h>
53 #include <vm/vm_extern.h>
54 #include <vm/vm_kern.h>
55 #include <vm/vm_param.h>
56 #include <vm/vm_object.h>
57 #include <vm/vm_page.h>
58 #include <vm/vm_pageout.h>
59 #include <vm/pmap.h>
60
61 #include <machine/bus.h>
62 #include <machine/resource.h>
63 #include <sys/rman.h>
64
65 MODULE_VERSION(agp, 1);
66
67 MALLOC_DEFINE(M_AGP, "agp", "AGP data structures");
68
69 /* agp_drv.c */
70 static d_open_t agp_open;
71 static d_close_t agp_close;
72 static d_ioctl_t agp_ioctl;
73 static d_mmap_t agp_mmap;
74
75 static struct cdevsw agp_cdevsw = {
76 .d_version = D_VERSION,
77 .d_flags = D_NEEDGIANT,
78 .d_open = agp_open,
79 .d_close = agp_close,
80 .d_ioctl = agp_ioctl,
81 .d_mmap = agp_mmap,
82 .d_name = "agp",
83 };
84
85 static devclass_t agp_devclass;
86
87 /* Helper functions for implementing chipset mini drivers. */
88
89 u_int8_t
agp_find_caps(device_t dev)90 agp_find_caps(device_t dev)
91 {
92 int capreg;
93
94 if (pci_find_cap(dev, PCIY_AGP, &capreg) != 0)
95 capreg = 0;
96 return (capreg);
97 }
98
99 /*
100 * Find an AGP display device (if any).
101 */
102 static device_t
agp_find_display(void)103 agp_find_display(void)
104 {
105 devclass_t pci = devclass_find("pci");
106 device_t bus, dev = 0;
107 device_t *kids;
108 int busnum, numkids, i;
109
110 for (busnum = 0; busnum < devclass_get_maxunit(pci); busnum++) {
111 bus = devclass_get_device(pci, busnum);
112 if (!bus)
113 continue;
114 if (device_get_children(bus, &kids, &numkids) != 0)
115 continue;
116 for (i = 0; i < numkids; i++) {
117 dev = kids[i];
118 if (pci_get_class(dev) == PCIC_DISPLAY
119 && pci_get_subclass(dev) == PCIS_DISPLAY_VGA)
120 if (agp_find_caps(dev)) {
121 free(kids, M_TEMP);
122 return dev;
123 }
124
125 }
126 free(kids, M_TEMP);
127 }
128
129 return 0;
130 }
131
132 struct agp_gatt *
agp_alloc_gatt(device_t dev)133 agp_alloc_gatt(device_t dev)
134 {
135 u_int32_t apsize = AGP_GET_APERTURE(dev);
136 u_int32_t entries = apsize >> AGP_PAGE_SHIFT;
137 struct agp_gatt *gatt;
138
139 if (bootverbose)
140 device_printf(dev,
141 "allocating GATT for aperture of size %dM\n",
142 apsize / (1024*1024));
143
144 if (entries == 0) {
145 device_printf(dev, "bad aperture size\n");
146 return NULL;
147 }
148
149 gatt = malloc(sizeof(struct agp_gatt), M_AGP, M_NOWAIT);
150 if (!gatt)
151 return 0;
152
153 gatt->ag_entries = entries;
154 gatt->ag_virtual = kmem_alloc_contig(entries * sizeof(uint32_t),
155 M_NOWAIT | M_ZERO, 0, ~0, PAGE_SIZE, 0, VM_MEMATTR_WRITE_COMBINING);
156 if (!gatt->ag_virtual) {
157 if (bootverbose)
158 device_printf(dev, "contiguous allocation failed\n");
159 free(gatt, M_AGP);
160 return 0;
161 }
162 gatt->ag_physical = vtophys((vm_offset_t) gatt->ag_virtual);
163
164 return gatt;
165 }
166
167 void
agp_free_gatt(struct agp_gatt * gatt)168 agp_free_gatt(struct agp_gatt *gatt)
169 {
170 kmem_free(gatt->ag_virtual, gatt->ag_entries * sizeof(uint32_t));
171 free(gatt, M_AGP);
172 }
173
174 static u_int agp_max[][2] = {
175 {0, 0},
176 {32, 4},
177 {64, 28},
178 {128, 96},
179 {256, 204},
180 {512, 440},
181 {1024, 942},
182 {2048, 1920},
183 {4096, 3932}
184 };
185 #define AGP_MAX_SIZE nitems(agp_max)
186
187 /**
188 * Sets the PCI resource which represents the AGP aperture.
189 *
190 * If not called, the default AGP aperture resource of AGP_APBASE will
191 * be used. Must be called before agp_generic_attach().
192 */
193 void
agp_set_aperture_resource(device_t dev,int rid)194 agp_set_aperture_resource(device_t dev, int rid)
195 {
196 struct agp_softc *sc = device_get_softc(dev);
197
198 sc->as_aperture_rid = rid;
199 }
200
201 int
agp_generic_attach(device_t dev)202 agp_generic_attach(device_t dev)
203 {
204 struct make_dev_args mdargs;
205 struct agp_softc *sc = device_get_softc(dev);
206 int error, i, unit;
207 u_int memsize;
208
209 /*
210 * Find and map the aperture, RF_SHAREABLE for DRM but not RF_ACTIVE
211 * because the kernel doesn't need to map it.
212 */
213
214 if (sc->as_aperture_rid != -1) {
215 if (sc->as_aperture_rid == 0)
216 sc->as_aperture_rid = AGP_APBASE;
217
218 sc->as_aperture = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
219 &sc->as_aperture_rid, RF_SHAREABLE);
220 if (!sc->as_aperture)
221 return ENOMEM;
222 }
223
224 /*
225 * Work out an upper bound for agp memory allocation. This
226 * uses a heurisitc table from the Linux driver.
227 */
228 memsize = ptoa(realmem) >> 20;
229 for (i = 0; i < AGP_MAX_SIZE; i++) {
230 if (memsize <= agp_max[i][0])
231 break;
232 }
233 if (i == AGP_MAX_SIZE)
234 i = AGP_MAX_SIZE - 1;
235 sc->as_maxmem = agp_max[i][1] << 20U;
236
237 /*
238 * The lock is used to prevent re-entry to
239 * agp_generic_bind_memory() since that function can sleep.
240 */
241 mtx_init(&sc->as_lock, "agp lock", NULL, MTX_DEF);
242
243 /*
244 * Initialise stuff for the userland device.
245 */
246 agp_devclass = devclass_find("agp");
247 TAILQ_INIT(&sc->as_memory);
248 sc->as_nextid = 1;
249
250 sc->as_devalias = NULL;
251
252 make_dev_args_init(&mdargs);
253 mdargs.mda_devsw = &agp_cdevsw;
254 mdargs.mda_uid = UID_ROOT;
255 mdargs.mda_gid = GID_WHEEL;
256 mdargs.mda_mode = 0600;
257 mdargs.mda_si_drv1 = dev;
258 mdargs.mda_si_drv2 = NULL;
259
260 unit = device_get_unit(dev);
261 error = make_dev_s(&mdargs, &sc->as_devnode, "agpgart%d", unit);
262 if (error == 0) {
263 /*
264 * Create an alias for the first device that shows up.
265 */
266 if (unit == 0) {
267 (void)make_dev_alias_p(MAKEDEV_CHECKNAME,
268 &sc->as_devalias, sc->as_devnode, "agpgart");
269 }
270 } else {
271 agp_free_res(dev);
272 }
273
274 return error;
275 }
276
277 void
agp_free_cdev(device_t dev)278 agp_free_cdev(device_t dev)
279 {
280 struct agp_softc *sc = device_get_softc(dev);
281
282 destroy_dev(sc->as_devnode);
283 if (sc->as_devalias != NULL)
284 destroy_dev(sc->as_devalias);
285 }
286
287 void
agp_free_res(device_t dev)288 agp_free_res(device_t dev)
289 {
290 struct agp_softc *sc = device_get_softc(dev);
291
292 if (sc->as_aperture != NULL)
293 bus_release_resource(dev, SYS_RES_MEMORY, sc->as_aperture_rid,
294 sc->as_aperture);
295 mtx_destroy(&sc->as_lock);
296 }
297
298 int
agp_generic_detach(device_t dev)299 agp_generic_detach(device_t dev)
300 {
301
302 agp_free_cdev(dev);
303 agp_free_res(dev);
304 return 0;
305 }
306
307 /**
308 * Default AGP aperture size detection which simply returns the size of
309 * the aperture's PCI resource.
310 */
311 u_int32_t
agp_generic_get_aperture(device_t dev)312 agp_generic_get_aperture(device_t dev)
313 {
314 struct agp_softc *sc = device_get_softc(dev);
315
316 return rman_get_size(sc->as_aperture);
317 }
318
319 /**
320 * Default AGP aperture size setting function, which simply doesn't allow
321 * changes to resource size.
322 */
323 int
agp_generic_set_aperture(device_t dev,u_int32_t aperture)324 agp_generic_set_aperture(device_t dev, u_int32_t aperture)
325 {
326 u_int32_t current_aperture;
327
328 current_aperture = AGP_GET_APERTURE(dev);
329 if (current_aperture != aperture)
330 return EINVAL;
331 else
332 return 0;
333 }
334
335 /*
336 * This does the enable logic for v3, with the same topology
337 * restrictions as in place for v2 -- one bus, one device on the bus.
338 */
339 static int
agp_v3_enable(device_t dev,device_t mdev,u_int32_t mode)340 agp_v3_enable(device_t dev, device_t mdev, u_int32_t mode)
341 {
342 u_int32_t tstatus, mstatus;
343 u_int32_t command;
344 int rq, sba, fw, rate, arqsz, cal;
345
346 tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
347 mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4);
348
349 /* Set RQ to the min of mode, tstatus and mstatus */
350 rq = AGP_MODE_GET_RQ(mode);
351 if (AGP_MODE_GET_RQ(tstatus) < rq)
352 rq = AGP_MODE_GET_RQ(tstatus);
353 if (AGP_MODE_GET_RQ(mstatus) < rq)
354 rq = AGP_MODE_GET_RQ(mstatus);
355
356 /*
357 * ARQSZ - Set the value to the maximum one.
358 * Don't allow the mode register to override values.
359 */
360 arqsz = AGP_MODE_GET_ARQSZ(mode);
361 if (AGP_MODE_GET_ARQSZ(tstatus) > rq)
362 rq = AGP_MODE_GET_ARQSZ(tstatus);
363 if (AGP_MODE_GET_ARQSZ(mstatus) > rq)
364 rq = AGP_MODE_GET_ARQSZ(mstatus);
365
366 /* Calibration cycle - don't allow override by mode register */
367 cal = AGP_MODE_GET_CAL(tstatus);
368 if (AGP_MODE_GET_CAL(mstatus) < cal)
369 cal = AGP_MODE_GET_CAL(mstatus);
370
371 /* SBA must be supported for AGP v3. */
372 sba = 1;
373
374 /* Set FW if all three support it. */
375 fw = (AGP_MODE_GET_FW(tstatus)
376 & AGP_MODE_GET_FW(mstatus)
377 & AGP_MODE_GET_FW(mode));
378
379 /* Figure out the max rate */
380 rate = (AGP_MODE_GET_RATE(tstatus)
381 & AGP_MODE_GET_RATE(mstatus)
382 & AGP_MODE_GET_RATE(mode));
383 if (rate & AGP_MODE_V3_RATE_8x)
384 rate = AGP_MODE_V3_RATE_8x;
385 else
386 rate = AGP_MODE_V3_RATE_4x;
387 if (bootverbose)
388 device_printf(dev, "Setting AGP v3 mode %d\n", rate * 4);
389
390 pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, 0, 4);
391
392 /* Construct the new mode word and tell the hardware */
393 command = 0;
394 command = AGP_MODE_SET_RQ(0, rq);
395 command = AGP_MODE_SET_ARQSZ(command, arqsz);
396 command = AGP_MODE_SET_CAL(command, cal);
397 command = AGP_MODE_SET_SBA(command, sba);
398 command = AGP_MODE_SET_FW(command, fw);
399 command = AGP_MODE_SET_RATE(command, rate);
400 command = AGP_MODE_SET_MODE_3(command, 1);
401 command = AGP_MODE_SET_AGP(command, 1);
402 pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, command, 4);
403 pci_write_config(mdev, agp_find_caps(mdev) + AGP_COMMAND, command, 4);
404
405 return 0;
406 }
407
408 static int
agp_v2_enable(device_t dev,device_t mdev,u_int32_t mode)409 agp_v2_enable(device_t dev, device_t mdev, u_int32_t mode)
410 {
411 u_int32_t tstatus, mstatus;
412 u_int32_t command;
413 int rq, sba, fw, rate;
414
415 tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
416 mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4);
417
418 /* Set RQ to the min of mode, tstatus and mstatus */
419 rq = AGP_MODE_GET_RQ(mode);
420 if (AGP_MODE_GET_RQ(tstatus) < rq)
421 rq = AGP_MODE_GET_RQ(tstatus);
422 if (AGP_MODE_GET_RQ(mstatus) < rq)
423 rq = AGP_MODE_GET_RQ(mstatus);
424
425 /* Set SBA if all three can deal with SBA */
426 sba = (AGP_MODE_GET_SBA(tstatus)
427 & AGP_MODE_GET_SBA(mstatus)
428 & AGP_MODE_GET_SBA(mode));
429
430 /* Similar for FW */
431 fw = (AGP_MODE_GET_FW(tstatus)
432 & AGP_MODE_GET_FW(mstatus)
433 & AGP_MODE_GET_FW(mode));
434
435 /* Figure out the max rate */
436 rate = (AGP_MODE_GET_RATE(tstatus)
437 & AGP_MODE_GET_RATE(mstatus)
438 & AGP_MODE_GET_RATE(mode));
439 if (rate & AGP_MODE_V2_RATE_4x)
440 rate = AGP_MODE_V2_RATE_4x;
441 else if (rate & AGP_MODE_V2_RATE_2x)
442 rate = AGP_MODE_V2_RATE_2x;
443 else
444 rate = AGP_MODE_V2_RATE_1x;
445 if (bootverbose)
446 device_printf(dev, "Setting AGP v2 mode %d\n", rate);
447
448 /* Construct the new mode word and tell the hardware */
449 command = 0;
450 command = AGP_MODE_SET_RQ(0, rq);
451 command = AGP_MODE_SET_SBA(command, sba);
452 command = AGP_MODE_SET_FW(command, fw);
453 command = AGP_MODE_SET_RATE(command, rate);
454 command = AGP_MODE_SET_AGP(command, 1);
455 pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, command, 4);
456 pci_write_config(mdev, agp_find_caps(mdev) + AGP_COMMAND, command, 4);
457
458 return 0;
459 }
460
461 int
agp_generic_enable(device_t dev,u_int32_t mode)462 agp_generic_enable(device_t dev, u_int32_t mode)
463 {
464 device_t mdev = agp_find_display();
465 u_int32_t tstatus, mstatus;
466
467 if (!mdev) {
468 AGP_DPF("can't find display\n");
469 return ENXIO;
470 }
471
472 tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
473 mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4);
474
475 /*
476 * Check display and bridge for AGP v3 support. AGP v3 allows
477 * more variety in topology than v2, e.g. multiple AGP devices
478 * attached to one bridge, or multiple AGP bridges in one
479 * system. This doesn't attempt to address those situations,
480 * but should work fine for a classic single AGP slot system
481 * with AGP v3.
482 */
483 if (AGP_MODE_GET_MODE_3(mode) &&
484 AGP_MODE_GET_MODE_3(tstatus) &&
485 AGP_MODE_GET_MODE_3(mstatus))
486 return (agp_v3_enable(dev, mdev, mode));
487 else
488 return (agp_v2_enable(dev, mdev, mode));
489 }
490
491 struct agp_memory *
agp_generic_alloc_memory(device_t dev,int type,vm_size_t size)492 agp_generic_alloc_memory(device_t dev, int type, vm_size_t size)
493 {
494 struct agp_softc *sc = device_get_softc(dev);
495 struct agp_memory *mem;
496
497 if ((size & (AGP_PAGE_SIZE - 1)) != 0)
498 return 0;
499
500 if (size > sc->as_maxmem - sc->as_allocated)
501 return 0;
502
503 if (type != 0) {
504 printf("agp_generic_alloc_memory: unsupported type %d\n",
505 type);
506 return 0;
507 }
508
509 mem = malloc(sizeof *mem, M_AGP, M_WAITOK);
510 mem->am_id = sc->as_nextid++;
511 mem->am_size = size;
512 mem->am_type = 0;
513 mem->am_obj = vm_object_allocate(OBJT_SWAP, atop(round_page(size)));
514 mem->am_physical = 0;
515 mem->am_offset = 0;
516 mem->am_is_bound = 0;
517 TAILQ_INSERT_TAIL(&sc->as_memory, mem, am_link);
518 sc->as_allocated += size;
519
520 return mem;
521 }
522
523 int
agp_generic_free_memory(device_t dev,struct agp_memory * mem)524 agp_generic_free_memory(device_t dev, struct agp_memory *mem)
525 {
526 struct agp_softc *sc = device_get_softc(dev);
527
528 if (mem->am_is_bound)
529 return EBUSY;
530
531 sc->as_allocated -= mem->am_size;
532 TAILQ_REMOVE(&sc->as_memory, mem, am_link);
533 vm_object_deallocate(mem->am_obj);
534 free(mem, M_AGP);
535 return 0;
536 }
537
538 int
agp_generic_bind_memory(device_t dev,struct agp_memory * mem,vm_offset_t offset)539 agp_generic_bind_memory(device_t dev, struct agp_memory *mem,
540 vm_offset_t offset)
541 {
542 struct agp_softc *sc = device_get_softc(dev);
543 vm_offset_t i, j, k;
544 vm_page_t m;
545 int error;
546
547 /* Do some sanity checks first. */
548 if ((offset & (AGP_PAGE_SIZE - 1)) != 0 ||
549 offset + mem->am_size > AGP_GET_APERTURE(dev)) {
550 device_printf(dev, "binding memory at bad offset %#x\n",
551 (int)offset);
552 return EINVAL;
553 }
554
555 /*
556 * Allocate the pages early, before acquiring the lock,
557 * because vm_page_grab() may sleep and we can't hold a mutex
558 * while sleeping.
559 */
560 VM_OBJECT_WLOCK(mem->am_obj);
561 for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
562 /*
563 * Find a page from the object and wire it
564 * down. This page will be mapped using one or more
565 * entries in the GATT (assuming that PAGE_SIZE >=
566 * AGP_PAGE_SIZE. If this is the first call to bind,
567 * the pages will be allocated and zeroed.
568 */
569 m = vm_page_grab(mem->am_obj, OFF_TO_IDX(i),
570 VM_ALLOC_WIRED | VM_ALLOC_ZERO);
571 AGP_DPF("found page pa=%#jx\n", (uintmax_t)VM_PAGE_TO_PHYS(m));
572 }
573 VM_OBJECT_WUNLOCK(mem->am_obj);
574
575 mtx_lock(&sc->as_lock);
576
577 if (mem->am_is_bound) {
578 device_printf(dev, "memory already bound\n");
579 error = EINVAL;
580 VM_OBJECT_WLOCK(mem->am_obj);
581 i = 0;
582 goto bad;
583 }
584
585 /*
586 * Bind the individual pages and flush the chipset's
587 * TLB.
588 */
589 VM_OBJECT_WLOCK(mem->am_obj);
590 for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
591 m = vm_page_lookup(mem->am_obj, OFF_TO_IDX(i));
592
593 /*
594 * Install entries in the GATT, making sure that if
595 * AGP_PAGE_SIZE < PAGE_SIZE and mem->am_size is not
596 * aligned to PAGE_SIZE, we don't modify too many GATT
597 * entries.
598 */
599 for (j = 0; j < PAGE_SIZE && i + j < mem->am_size;
600 j += AGP_PAGE_SIZE) {
601 vm_offset_t pa = VM_PAGE_TO_PHYS(m) + j;
602 AGP_DPF("binding offset %#jx to pa %#jx\n",
603 (uintmax_t)offset + i + j, (uintmax_t)pa);
604 error = AGP_BIND_PAGE(dev, offset + i + j, pa);
605 if (error) {
606 /*
607 * Bail out. Reverse all the mappings
608 * and unwire the pages.
609 */
610 for (k = 0; k < i + j; k += AGP_PAGE_SIZE)
611 AGP_UNBIND_PAGE(dev, offset + k);
612 goto bad;
613 }
614 }
615 vm_page_xunbusy(m);
616 }
617 VM_OBJECT_WUNLOCK(mem->am_obj);
618
619 /*
620 * Make sure the chipset gets the new mappings.
621 */
622 AGP_FLUSH_TLB(dev);
623
624 mem->am_offset = offset;
625 mem->am_is_bound = 1;
626
627 mtx_unlock(&sc->as_lock);
628
629 return 0;
630 bad:
631 mtx_unlock(&sc->as_lock);
632 VM_OBJECT_ASSERT_WLOCKED(mem->am_obj);
633 for (k = 0; k < mem->am_size; k += PAGE_SIZE) {
634 m = vm_page_lookup(mem->am_obj, OFF_TO_IDX(k));
635 if (k >= i)
636 vm_page_xunbusy(m);
637 vm_page_unwire(m, PQ_INACTIVE);
638 }
639 VM_OBJECT_WUNLOCK(mem->am_obj);
640
641 return error;
642 }
643
644 int
agp_generic_unbind_memory(device_t dev,struct agp_memory * mem)645 agp_generic_unbind_memory(device_t dev, struct agp_memory *mem)
646 {
647 struct agp_softc *sc = device_get_softc(dev);
648 vm_page_t m;
649 int i;
650
651 mtx_lock(&sc->as_lock);
652
653 if (!mem->am_is_bound) {
654 device_printf(dev, "memory is not bound\n");
655 mtx_unlock(&sc->as_lock);
656 return EINVAL;
657 }
658
659 /*
660 * Unbind the individual pages and flush the chipset's
661 * TLB. Unwire the pages so they can be swapped.
662 */
663 for (i = 0; i < mem->am_size; i += AGP_PAGE_SIZE)
664 AGP_UNBIND_PAGE(dev, mem->am_offset + i);
665
666 AGP_FLUSH_TLB(dev);
667
668 VM_OBJECT_WLOCK(mem->am_obj);
669 for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
670 m = vm_page_lookup(mem->am_obj, atop(i));
671 vm_page_unwire(m, PQ_INACTIVE);
672 }
673 VM_OBJECT_WUNLOCK(mem->am_obj);
674
675 mem->am_offset = 0;
676 mem->am_is_bound = 0;
677
678 mtx_unlock(&sc->as_lock);
679
680 return 0;
681 }
682
683 /* Helper functions for implementing user/kernel api */
684
685 static int
agp_acquire_helper(device_t dev,enum agp_acquire_state state)686 agp_acquire_helper(device_t dev, enum agp_acquire_state state)
687 {
688 struct agp_softc *sc = device_get_softc(dev);
689
690 if (sc->as_state != AGP_ACQUIRE_FREE)
691 return EBUSY;
692 sc->as_state = state;
693
694 return 0;
695 }
696
697 static int
agp_release_helper(device_t dev,enum agp_acquire_state state)698 agp_release_helper(device_t dev, enum agp_acquire_state state)
699 {
700 struct agp_softc *sc = device_get_softc(dev);
701
702 if (sc->as_state == AGP_ACQUIRE_FREE)
703 return 0;
704
705 if (sc->as_state != state)
706 return EBUSY;
707
708 sc->as_state = AGP_ACQUIRE_FREE;
709 return 0;
710 }
711
712 static struct agp_memory *
agp_find_memory(device_t dev,int id)713 agp_find_memory(device_t dev, int id)
714 {
715 struct agp_softc *sc = device_get_softc(dev);
716 struct agp_memory *mem;
717
718 AGP_DPF("searching for memory block %d\n", id);
719 TAILQ_FOREACH(mem, &sc->as_memory, am_link) {
720 AGP_DPF("considering memory block %d\n", mem->am_id);
721 if (mem->am_id == id)
722 return mem;
723 }
724 return 0;
725 }
726
727 /* Implementation of the userland ioctl api */
728
729 static int
agp_info_user(device_t dev,agp_info * info)730 agp_info_user(device_t dev, agp_info *info)
731 {
732 struct agp_softc *sc = device_get_softc(dev);
733
734 bzero(info, sizeof *info);
735 info->bridge_id = pci_get_devid(dev);
736 info->agp_mode =
737 pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
738 if (sc->as_aperture)
739 info->aper_base = rman_get_start(sc->as_aperture);
740 else
741 info->aper_base = 0;
742 info->aper_size = AGP_GET_APERTURE(dev) >> 20;
743 info->pg_total = info->pg_system = sc->as_maxmem >> AGP_PAGE_SHIFT;
744 info->pg_used = sc->as_allocated >> AGP_PAGE_SHIFT;
745
746 return 0;
747 }
748
749 static int
agp_setup_user(device_t dev,agp_setup * setup)750 agp_setup_user(device_t dev, agp_setup *setup)
751 {
752 return AGP_ENABLE(dev, setup->agp_mode);
753 }
754
755 static int
agp_allocate_user(device_t dev,agp_allocate * alloc)756 agp_allocate_user(device_t dev, agp_allocate *alloc)
757 {
758 struct agp_memory *mem;
759
760 mem = AGP_ALLOC_MEMORY(dev,
761 alloc->type,
762 alloc->pg_count << AGP_PAGE_SHIFT);
763 if (mem) {
764 alloc->key = mem->am_id;
765 alloc->physical = mem->am_physical;
766 return 0;
767 } else {
768 return ENOMEM;
769 }
770 }
771
772 static int
agp_deallocate_user(device_t dev,int id)773 agp_deallocate_user(device_t dev, int id)
774 {
775 struct agp_memory *mem = agp_find_memory(dev, id);
776
777 if (mem) {
778 AGP_FREE_MEMORY(dev, mem);
779 return 0;
780 } else {
781 return ENOENT;
782 }
783 }
784
785 static int
agp_bind_user(device_t dev,agp_bind * bind)786 agp_bind_user(device_t dev, agp_bind *bind)
787 {
788 struct agp_memory *mem = agp_find_memory(dev, bind->key);
789
790 if (!mem)
791 return ENOENT;
792
793 return AGP_BIND_MEMORY(dev, mem, bind->pg_start << AGP_PAGE_SHIFT);
794 }
795
796 static int
agp_unbind_user(device_t dev,agp_unbind * unbind)797 agp_unbind_user(device_t dev, agp_unbind *unbind)
798 {
799 struct agp_memory *mem = agp_find_memory(dev, unbind->key);
800
801 if (!mem)
802 return ENOENT;
803
804 return AGP_UNBIND_MEMORY(dev, mem);
805 }
806
807 static int
agp_chipset_flush(device_t dev)808 agp_chipset_flush(device_t dev)
809 {
810
811 return (AGP_CHIPSET_FLUSH(dev));
812 }
813
814 static int
agp_open(struct cdev * kdev,int oflags,int devtype,struct thread * td)815 agp_open(struct cdev *kdev, int oflags, int devtype, struct thread *td)
816 {
817 device_t dev = kdev->si_drv1;
818 struct agp_softc *sc = device_get_softc(dev);
819
820 if (!sc->as_isopen) {
821 sc->as_isopen = 1;
822 device_busy(dev);
823 }
824
825 return 0;
826 }
827
828 static int
agp_close(struct cdev * kdev,int fflag,int devtype,struct thread * td)829 agp_close(struct cdev *kdev, int fflag, int devtype, struct thread *td)
830 {
831 device_t dev = kdev->si_drv1;
832 struct agp_softc *sc = device_get_softc(dev);
833 struct agp_memory *mem;
834
835 /*
836 * Clear the GATT and force release on last close
837 */
838 while ((mem = TAILQ_FIRST(&sc->as_memory)) != NULL) {
839 if (mem->am_is_bound)
840 AGP_UNBIND_MEMORY(dev, mem);
841 AGP_FREE_MEMORY(dev, mem);
842 }
843 if (sc->as_state == AGP_ACQUIRE_USER)
844 agp_release_helper(dev, AGP_ACQUIRE_USER);
845 sc->as_isopen = 0;
846 device_unbusy(dev);
847
848 return 0;
849 }
850
851 static int
agp_ioctl(struct cdev * kdev,u_long cmd,caddr_t data,int fflag,struct thread * td)852 agp_ioctl(struct cdev *kdev, u_long cmd, caddr_t data, int fflag, struct thread *td)
853 {
854 device_t dev = kdev->si_drv1;
855
856 switch (cmd) {
857 case AGPIOC_INFO:
858 return agp_info_user(dev, (agp_info *) data);
859
860 case AGPIOC_ACQUIRE:
861 return agp_acquire_helper(dev, AGP_ACQUIRE_USER);
862
863 case AGPIOC_RELEASE:
864 return agp_release_helper(dev, AGP_ACQUIRE_USER);
865
866 case AGPIOC_SETUP:
867 return agp_setup_user(dev, (agp_setup *)data);
868
869 case AGPIOC_ALLOCATE:
870 return agp_allocate_user(dev, (agp_allocate *)data);
871
872 case AGPIOC_DEALLOCATE:
873 return agp_deallocate_user(dev, *(int *) data);
874
875 case AGPIOC_BIND:
876 return agp_bind_user(dev, (agp_bind *)data);
877
878 case AGPIOC_UNBIND:
879 return agp_unbind_user(dev, (agp_unbind *)data);
880
881 case AGPIOC_CHIPSET_FLUSH:
882 return agp_chipset_flush(dev);
883 }
884
885 return EINVAL;
886 }
887
888 static int
agp_mmap(struct cdev * kdev,vm_ooffset_t offset,vm_paddr_t * paddr,int prot,vm_memattr_t * memattr)889 agp_mmap(struct cdev *kdev, vm_ooffset_t offset, vm_paddr_t *paddr,
890 int prot, vm_memattr_t *memattr)
891 {
892 device_t dev = kdev->si_drv1;
893 struct agp_softc *sc = device_get_softc(dev);
894
895 if (offset > AGP_GET_APERTURE(dev))
896 return -1;
897 if (sc->as_aperture == NULL)
898 return -1;
899 *paddr = rman_get_start(sc->as_aperture) + offset;
900 return 0;
901 }
902
903 /* Implementation of the kernel api */
904
905 device_t
agp_find_device(void)906 agp_find_device(void)
907 {
908 device_t *children, child;
909 int i, count;
910
911 if (!agp_devclass)
912 return NULL;
913 if (devclass_get_devices(agp_devclass, &children, &count) != 0)
914 return NULL;
915 child = NULL;
916 for (i = 0; i < count; i++) {
917 if (device_is_attached(children[i])) {
918 child = children[i];
919 break;
920 }
921 }
922 free(children, M_TEMP);
923 return child;
924 }
925
926 enum agp_acquire_state
agp_state(device_t dev)927 agp_state(device_t dev)
928 {
929 struct agp_softc *sc = device_get_softc(dev);
930 return sc->as_state;
931 }
932
933 void
agp_get_info(device_t dev,struct agp_info * info)934 agp_get_info(device_t dev, struct agp_info *info)
935 {
936 struct agp_softc *sc = device_get_softc(dev);
937
938 info->ai_mode =
939 pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
940 if (sc->as_aperture != NULL)
941 info->ai_aperture_base = rman_get_start(sc->as_aperture);
942 else
943 info->ai_aperture_base = 0;
944 info->ai_aperture_size = AGP_GET_APERTURE(dev);
945 info->ai_memory_allowed = sc->as_maxmem;
946 info->ai_memory_used = sc->as_allocated;
947 }
948
949 int
agp_acquire(device_t dev)950 agp_acquire(device_t dev)
951 {
952 return agp_acquire_helper(dev, AGP_ACQUIRE_KERNEL);
953 }
954
955 int
agp_release(device_t dev)956 agp_release(device_t dev)
957 {
958 return agp_release_helper(dev, AGP_ACQUIRE_KERNEL);
959 }
960
961 int
agp_enable(device_t dev,u_int32_t mode)962 agp_enable(device_t dev, u_int32_t mode)
963 {
964 return AGP_ENABLE(dev, mode);
965 }
966
agp_alloc_memory(device_t dev,int type,vm_size_t bytes)967 void *agp_alloc_memory(device_t dev, int type, vm_size_t bytes)
968 {
969 return (void *) AGP_ALLOC_MEMORY(dev, type, bytes);
970 }
971
agp_free_memory(device_t dev,void * handle)972 void agp_free_memory(device_t dev, void *handle)
973 {
974 struct agp_memory *mem = (struct agp_memory *) handle;
975 AGP_FREE_MEMORY(dev, mem);
976 }
977
agp_bind_memory(device_t dev,void * handle,vm_offset_t offset)978 int agp_bind_memory(device_t dev, void *handle, vm_offset_t offset)
979 {
980 struct agp_memory *mem = (struct agp_memory *) handle;
981 return AGP_BIND_MEMORY(dev, mem, offset);
982 }
983
agp_unbind_memory(device_t dev,void * handle)984 int agp_unbind_memory(device_t dev, void *handle)
985 {
986 struct agp_memory *mem = (struct agp_memory *) handle;
987 return AGP_UNBIND_MEMORY(dev, mem);
988 }
989
agp_memory_info(device_t dev,void * handle,struct agp_memory_info * mi)990 void agp_memory_info(device_t dev, void *handle, struct
991 agp_memory_info *mi)
992 {
993 struct agp_memory *mem = (struct agp_memory *) handle;
994
995 mi->ami_size = mem->am_size;
996 mi->ami_physical = mem->am_physical;
997 mi->ami_offset = mem->am_offset;
998 mi->ami_is_bound = mem->am_is_bound;
999 }
1000
1001 int
agp_bind_pages(device_t dev,vm_page_t * pages,vm_size_t size,vm_offset_t offset)1002 agp_bind_pages(device_t dev, vm_page_t *pages, vm_size_t size,
1003 vm_offset_t offset)
1004 {
1005 struct agp_softc *sc;
1006 vm_offset_t i, j, k, pa;
1007 vm_page_t m;
1008 int error;
1009
1010 if ((size & (AGP_PAGE_SIZE - 1)) != 0 ||
1011 (offset & (AGP_PAGE_SIZE - 1)) != 0)
1012 return (EINVAL);
1013
1014 sc = device_get_softc(dev);
1015
1016 mtx_lock(&sc->as_lock);
1017 for (i = 0; i < size; i += PAGE_SIZE) {
1018 m = pages[OFF_TO_IDX(i)];
1019 KASSERT(vm_page_wired(m),
1020 ("agp_bind_pages: page %p hasn't been wired", m));
1021
1022 /*
1023 * Install entries in the GATT, making sure that if
1024 * AGP_PAGE_SIZE < PAGE_SIZE and size is not
1025 * aligned to PAGE_SIZE, we don't modify too many GATT
1026 * entries.
1027 */
1028 for (j = 0; j < PAGE_SIZE && i + j < size; j += AGP_PAGE_SIZE) {
1029 pa = VM_PAGE_TO_PHYS(m) + j;
1030 AGP_DPF("binding offset %#jx to pa %#jx\n",
1031 (uintmax_t)offset + i + j, (uintmax_t)pa);
1032 error = AGP_BIND_PAGE(dev, offset + i + j, pa);
1033 if (error) {
1034 /*
1035 * Bail out. Reverse all the mappings.
1036 */
1037 for (k = 0; k < i + j; k += AGP_PAGE_SIZE)
1038 AGP_UNBIND_PAGE(dev, offset + k);
1039
1040 mtx_unlock(&sc->as_lock);
1041 return (error);
1042 }
1043 }
1044 }
1045
1046 AGP_FLUSH_TLB(dev);
1047
1048 mtx_unlock(&sc->as_lock);
1049 return (0);
1050 }
1051
1052 int
agp_unbind_pages(device_t dev,vm_size_t size,vm_offset_t offset)1053 agp_unbind_pages(device_t dev, vm_size_t size, vm_offset_t offset)
1054 {
1055 struct agp_softc *sc;
1056 vm_offset_t i;
1057
1058 if ((size & (AGP_PAGE_SIZE - 1)) != 0 ||
1059 (offset & (AGP_PAGE_SIZE - 1)) != 0)
1060 return (EINVAL);
1061
1062 sc = device_get_softc(dev);
1063
1064 mtx_lock(&sc->as_lock);
1065 for (i = 0; i < size; i += AGP_PAGE_SIZE)
1066 AGP_UNBIND_PAGE(dev, offset + i);
1067
1068 AGP_FLUSH_TLB(dev);
1069
1070 mtx_unlock(&sc->as_lock);
1071 return (0);
1072 }
1073