1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2000 Doug Rabson
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 #include "opt_agp.h"
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/malloc.h>
35 #include <sys/kernel.h>
36 #include <sys/module.h>
37 #include <sys/bus.h>
38 #include <sys/conf.h>
39 #include <sys/ioccom.h>
40 #include <sys/agpio.h>
41 #include <sys/lock.h>
42 #include <sys/mutex.h>
43 #include <sys/proc.h>
44 #include <sys/rwlock.h>
45
46 #include <dev/agp/agppriv.h>
47 #include <dev/agp/agpvar.h>
48 #include <dev/agp/agpreg.h>
49 #include <dev/pci/pcivar.h>
50 #include <dev/pci/pcireg.h>
51
52 #include <vm/vm.h>
53 #include <vm/vm_extern.h>
54 #include <vm/vm_kern.h>
55 #include <vm/vm_param.h>
56 #include <vm/vm_object.h>
57 #include <vm/vm_page.h>
58 #include <vm/vm_pageout.h>
59 #include <vm/vm_radix.h>
60 #include <vm/pmap.h>
61
62 #include <machine/bus.h>
63 #include <machine/resource.h>
64 #include <sys/rman.h>
65
66 MODULE_VERSION(agp, 1);
67
68 MALLOC_DEFINE(M_AGP, "agp", "AGP data structures");
69
70 /* agp_drv.c */
71 static d_open_t agp_open;
72 static d_close_t agp_close;
73 static d_ioctl_t agp_ioctl;
74 static d_mmap_t agp_mmap;
75
76 static struct cdevsw agp_cdevsw = {
77 .d_version = D_VERSION,
78 .d_flags = D_NEEDGIANT,
79 .d_open = agp_open,
80 .d_close = agp_close,
81 .d_ioctl = agp_ioctl,
82 .d_mmap = agp_mmap,
83 .d_name = "agp",
84 };
85
86 static devclass_t agp_devclass;
87
88 /* Helper functions for implementing chipset mini drivers. */
89
90 u_int8_t
agp_find_caps(device_t dev)91 agp_find_caps(device_t dev)
92 {
93 int capreg;
94
95 if (pci_find_cap(dev, PCIY_AGP, &capreg) != 0)
96 capreg = 0;
97 return (capreg);
98 }
99
100 /*
101 * Find an AGP display device (if any).
102 */
103 static device_t
agp_find_display(void)104 agp_find_display(void)
105 {
106 devclass_t pci = devclass_find("pci");
107 device_t bus, dev = 0;
108 device_t *kids;
109 int busnum, numkids, i;
110
111 for (busnum = 0; busnum < devclass_get_maxunit(pci); busnum++) {
112 bus = devclass_get_device(pci, busnum);
113 if (!bus)
114 continue;
115 if (device_get_children(bus, &kids, &numkids) != 0)
116 continue;
117 for (i = 0; i < numkids; i++) {
118 dev = kids[i];
119 if (pci_get_class(dev) == PCIC_DISPLAY
120 && pci_get_subclass(dev) == PCIS_DISPLAY_VGA)
121 if (agp_find_caps(dev)) {
122 free(kids, M_TEMP);
123 return dev;
124 }
125
126 }
127 free(kids, M_TEMP);
128 }
129
130 return 0;
131 }
132
133 struct agp_gatt *
agp_alloc_gatt(device_t dev)134 agp_alloc_gatt(device_t dev)
135 {
136 u_int32_t apsize = AGP_GET_APERTURE(dev);
137 u_int32_t entries = apsize >> AGP_PAGE_SHIFT;
138 struct agp_gatt *gatt;
139
140 if (bootverbose)
141 device_printf(dev,
142 "allocating GATT for aperture of size %dM\n",
143 apsize / (1024*1024));
144
145 if (entries == 0) {
146 device_printf(dev, "bad aperture size\n");
147 return NULL;
148 }
149
150 gatt = malloc(sizeof(struct agp_gatt), M_AGP, M_NOWAIT);
151 if (!gatt)
152 return 0;
153
154 gatt->ag_entries = entries;
155 gatt->ag_virtual = kmem_alloc_contig(entries * sizeof(uint32_t),
156 M_NOWAIT | M_ZERO, 0, ~0, PAGE_SIZE, 0, VM_MEMATTR_WRITE_COMBINING);
157 if (!gatt->ag_virtual) {
158 if (bootverbose)
159 device_printf(dev, "contiguous allocation failed\n");
160 free(gatt, M_AGP);
161 return 0;
162 }
163 gatt->ag_physical = vtophys((vm_offset_t) gatt->ag_virtual);
164
165 return gatt;
166 }
167
168 void
agp_free_gatt(struct agp_gatt * gatt)169 agp_free_gatt(struct agp_gatt *gatt)
170 {
171 kmem_free(gatt->ag_virtual, gatt->ag_entries * sizeof(uint32_t));
172 free(gatt, M_AGP);
173 }
174
175 static u_int agp_max[][2] = {
176 {0, 0},
177 {32, 4},
178 {64, 28},
179 {128, 96},
180 {256, 204},
181 {512, 440},
182 {1024, 942},
183 {2048, 1920},
184 {4096, 3932}
185 };
186 #define AGP_MAX_SIZE nitems(agp_max)
187
188 /**
189 * Sets the PCI resource which represents the AGP aperture.
190 *
191 * If not called, the default AGP aperture resource of AGP_APBASE will
192 * be used. Must be called before agp_generic_attach().
193 */
194 void
agp_set_aperture_resource(device_t dev,int rid)195 agp_set_aperture_resource(device_t dev, int rid)
196 {
197 struct agp_softc *sc = device_get_softc(dev);
198
199 sc->as_aperture_rid = rid;
200 }
201
202 int
agp_generic_attach(device_t dev)203 agp_generic_attach(device_t dev)
204 {
205 struct make_dev_args mdargs;
206 struct agp_softc *sc = device_get_softc(dev);
207 int error, i, unit;
208 u_int memsize;
209
210 /*
211 * Find and map the aperture, RF_SHAREABLE for DRM but not RF_ACTIVE
212 * because the kernel doesn't need to map it.
213 */
214
215 if (sc->as_aperture_rid != -1) {
216 if (sc->as_aperture_rid == 0)
217 sc->as_aperture_rid = AGP_APBASE;
218
219 sc->as_aperture = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
220 &sc->as_aperture_rid, RF_SHAREABLE);
221 if (!sc->as_aperture)
222 return ENOMEM;
223 }
224
225 /*
226 * Work out an upper bound for agp memory allocation. This
227 * uses a heurisitc table from the Linux driver.
228 */
229 memsize = ptoa(realmem) >> 20;
230 for (i = 0; i < AGP_MAX_SIZE; i++) {
231 if (memsize <= agp_max[i][0])
232 break;
233 }
234 if (i == AGP_MAX_SIZE)
235 i = AGP_MAX_SIZE - 1;
236 sc->as_maxmem = agp_max[i][1] << 20U;
237
238 /*
239 * The lock is used to prevent re-entry to
240 * agp_generic_bind_memory() since that function can sleep.
241 */
242 mtx_init(&sc->as_lock, "agp lock", NULL, MTX_DEF);
243
244 /*
245 * Initialise stuff for the userland device.
246 */
247 agp_devclass = devclass_find("agp");
248 TAILQ_INIT(&sc->as_memory);
249 sc->as_nextid = 1;
250
251 sc->as_devalias = NULL;
252
253 make_dev_args_init(&mdargs);
254 mdargs.mda_devsw = &agp_cdevsw;
255 mdargs.mda_uid = UID_ROOT;
256 mdargs.mda_gid = GID_WHEEL;
257 mdargs.mda_mode = 0600;
258 mdargs.mda_si_drv1 = dev;
259 mdargs.mda_si_drv2 = NULL;
260
261 unit = device_get_unit(dev);
262 error = make_dev_s(&mdargs, &sc->as_devnode, "agpgart%d", unit);
263 if (error == 0) {
264 /*
265 * Create an alias for the first device that shows up.
266 */
267 if (unit == 0) {
268 (void)make_dev_alias_p(MAKEDEV_CHECKNAME,
269 &sc->as_devalias, sc->as_devnode, "agpgart");
270 }
271 } else {
272 agp_free_res(dev);
273 }
274
275 return error;
276 }
277
278 void
agp_free_cdev(device_t dev)279 agp_free_cdev(device_t dev)
280 {
281 struct agp_softc *sc = device_get_softc(dev);
282
283 destroy_dev(sc->as_devnode);
284 if (sc->as_devalias != NULL)
285 destroy_dev(sc->as_devalias);
286 }
287
288 void
agp_free_res(device_t dev)289 agp_free_res(device_t dev)
290 {
291 struct agp_softc *sc = device_get_softc(dev);
292
293 if (sc->as_aperture != NULL)
294 bus_release_resource(dev, SYS_RES_MEMORY, sc->as_aperture_rid,
295 sc->as_aperture);
296 mtx_destroy(&sc->as_lock);
297 }
298
299 int
agp_generic_detach(device_t dev)300 agp_generic_detach(device_t dev)
301 {
302
303 agp_free_cdev(dev);
304 agp_free_res(dev);
305 return 0;
306 }
307
308 /**
309 * Default AGP aperture size detection which simply returns the size of
310 * the aperture's PCI resource.
311 */
312 u_int32_t
agp_generic_get_aperture(device_t dev)313 agp_generic_get_aperture(device_t dev)
314 {
315 struct agp_softc *sc = device_get_softc(dev);
316
317 return rman_get_size(sc->as_aperture);
318 }
319
320 /**
321 * Default AGP aperture size setting function, which simply doesn't allow
322 * changes to resource size.
323 */
324 int
agp_generic_set_aperture(device_t dev,u_int32_t aperture)325 agp_generic_set_aperture(device_t dev, u_int32_t aperture)
326 {
327 u_int32_t current_aperture;
328
329 current_aperture = AGP_GET_APERTURE(dev);
330 if (current_aperture != aperture)
331 return EINVAL;
332 else
333 return 0;
334 }
335
336 /*
337 * This does the enable logic for v3, with the same topology
338 * restrictions as in place for v2 -- one bus, one device on the bus.
339 */
340 static int
agp_v3_enable(device_t dev,device_t mdev,u_int32_t mode)341 agp_v3_enable(device_t dev, device_t mdev, u_int32_t mode)
342 {
343 u_int32_t tstatus, mstatus;
344 u_int32_t command;
345 int rq, sba, fw, rate, arqsz, cal;
346
347 tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
348 mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4);
349
350 /* Set RQ to the min of mode, tstatus and mstatus */
351 rq = AGP_MODE_GET_RQ(mode);
352 if (AGP_MODE_GET_RQ(tstatus) < rq)
353 rq = AGP_MODE_GET_RQ(tstatus);
354 if (AGP_MODE_GET_RQ(mstatus) < rq)
355 rq = AGP_MODE_GET_RQ(mstatus);
356
357 /*
358 * ARQSZ - Set the value to the maximum one.
359 * Don't allow the mode register to override values.
360 */
361 arqsz = AGP_MODE_GET_ARQSZ(mode);
362 if (AGP_MODE_GET_ARQSZ(tstatus) > rq)
363 rq = AGP_MODE_GET_ARQSZ(tstatus);
364 if (AGP_MODE_GET_ARQSZ(mstatus) > rq)
365 rq = AGP_MODE_GET_ARQSZ(mstatus);
366
367 /* Calibration cycle - don't allow override by mode register */
368 cal = AGP_MODE_GET_CAL(tstatus);
369 if (AGP_MODE_GET_CAL(mstatus) < cal)
370 cal = AGP_MODE_GET_CAL(mstatus);
371
372 /* SBA must be supported for AGP v3. */
373 sba = 1;
374
375 /* Set FW if all three support it. */
376 fw = (AGP_MODE_GET_FW(tstatus)
377 & AGP_MODE_GET_FW(mstatus)
378 & AGP_MODE_GET_FW(mode));
379
380 /* Figure out the max rate */
381 rate = (AGP_MODE_GET_RATE(tstatus)
382 & AGP_MODE_GET_RATE(mstatus)
383 & AGP_MODE_GET_RATE(mode));
384 if (rate & AGP_MODE_V3_RATE_8x)
385 rate = AGP_MODE_V3_RATE_8x;
386 else
387 rate = AGP_MODE_V3_RATE_4x;
388 if (bootverbose)
389 device_printf(dev, "Setting AGP v3 mode %d\n", rate * 4);
390
391 pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, 0, 4);
392
393 /* Construct the new mode word and tell the hardware */
394 command = 0;
395 command = AGP_MODE_SET_RQ(0, rq);
396 command = AGP_MODE_SET_ARQSZ(command, arqsz);
397 command = AGP_MODE_SET_CAL(command, cal);
398 command = AGP_MODE_SET_SBA(command, sba);
399 command = AGP_MODE_SET_FW(command, fw);
400 command = AGP_MODE_SET_RATE(command, rate);
401 command = AGP_MODE_SET_MODE_3(command, 1);
402 command = AGP_MODE_SET_AGP(command, 1);
403 pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, command, 4);
404 pci_write_config(mdev, agp_find_caps(mdev) + AGP_COMMAND, command, 4);
405
406 return 0;
407 }
408
409 static int
agp_v2_enable(device_t dev,device_t mdev,u_int32_t mode)410 agp_v2_enable(device_t dev, device_t mdev, u_int32_t mode)
411 {
412 u_int32_t tstatus, mstatus;
413 u_int32_t command;
414 int rq, sba, fw, rate;
415
416 tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
417 mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4);
418
419 /* Set RQ to the min of mode, tstatus and mstatus */
420 rq = AGP_MODE_GET_RQ(mode);
421 if (AGP_MODE_GET_RQ(tstatus) < rq)
422 rq = AGP_MODE_GET_RQ(tstatus);
423 if (AGP_MODE_GET_RQ(mstatus) < rq)
424 rq = AGP_MODE_GET_RQ(mstatus);
425
426 /* Set SBA if all three can deal with SBA */
427 sba = (AGP_MODE_GET_SBA(tstatus)
428 & AGP_MODE_GET_SBA(mstatus)
429 & AGP_MODE_GET_SBA(mode));
430
431 /* Similar for FW */
432 fw = (AGP_MODE_GET_FW(tstatus)
433 & AGP_MODE_GET_FW(mstatus)
434 & AGP_MODE_GET_FW(mode));
435
436 /* Figure out the max rate */
437 rate = (AGP_MODE_GET_RATE(tstatus)
438 & AGP_MODE_GET_RATE(mstatus)
439 & AGP_MODE_GET_RATE(mode));
440 if (rate & AGP_MODE_V2_RATE_4x)
441 rate = AGP_MODE_V2_RATE_4x;
442 else if (rate & AGP_MODE_V2_RATE_2x)
443 rate = AGP_MODE_V2_RATE_2x;
444 else
445 rate = AGP_MODE_V2_RATE_1x;
446 if (bootverbose)
447 device_printf(dev, "Setting AGP v2 mode %d\n", rate);
448
449 /* Construct the new mode word and tell the hardware */
450 command = 0;
451 command = AGP_MODE_SET_RQ(0, rq);
452 command = AGP_MODE_SET_SBA(command, sba);
453 command = AGP_MODE_SET_FW(command, fw);
454 command = AGP_MODE_SET_RATE(command, rate);
455 command = AGP_MODE_SET_AGP(command, 1);
456 pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, command, 4);
457 pci_write_config(mdev, agp_find_caps(mdev) + AGP_COMMAND, command, 4);
458
459 return 0;
460 }
461
462 int
agp_generic_enable(device_t dev,u_int32_t mode)463 agp_generic_enable(device_t dev, u_int32_t mode)
464 {
465 device_t mdev = agp_find_display();
466 u_int32_t tstatus, mstatus;
467
468 if (!mdev) {
469 AGP_DPF("can't find display\n");
470 return ENXIO;
471 }
472
473 tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
474 mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4);
475
476 /*
477 * Check display and bridge for AGP v3 support. AGP v3 allows
478 * more variety in topology than v2, e.g. multiple AGP devices
479 * attached to one bridge, or multiple AGP bridges in one
480 * system. This doesn't attempt to address those situations,
481 * but should work fine for a classic single AGP slot system
482 * with AGP v3.
483 */
484 if (AGP_MODE_GET_MODE_3(mode) &&
485 AGP_MODE_GET_MODE_3(tstatus) &&
486 AGP_MODE_GET_MODE_3(mstatus))
487 return (agp_v3_enable(dev, mdev, mode));
488 else
489 return (agp_v2_enable(dev, mdev, mode));
490 }
491
492 struct agp_memory *
agp_generic_alloc_memory(device_t dev,int type,vm_size_t size)493 agp_generic_alloc_memory(device_t dev, int type, vm_size_t size)
494 {
495 struct agp_softc *sc = device_get_softc(dev);
496 struct agp_memory *mem;
497
498 if ((size & (AGP_PAGE_SIZE - 1)) != 0)
499 return 0;
500
501 if (size > sc->as_maxmem - sc->as_allocated)
502 return 0;
503
504 if (type != 0) {
505 printf("agp_generic_alloc_memory: unsupported type %d\n",
506 type);
507 return 0;
508 }
509
510 mem = malloc(sizeof *mem, M_AGP, M_WAITOK);
511 mem->am_id = sc->as_nextid++;
512 mem->am_size = size;
513 mem->am_type = 0;
514 mem->am_obj = vm_object_allocate(OBJT_SWAP, atop(round_page(size)));
515 mem->am_physical = 0;
516 mem->am_offset = 0;
517 mem->am_is_bound = 0;
518 TAILQ_INSERT_TAIL(&sc->as_memory, mem, am_link);
519 sc->as_allocated += size;
520
521 return mem;
522 }
523
524 int
agp_generic_free_memory(device_t dev,struct agp_memory * mem)525 agp_generic_free_memory(device_t dev, struct agp_memory *mem)
526 {
527 struct agp_softc *sc = device_get_softc(dev);
528
529 if (mem->am_is_bound)
530 return EBUSY;
531
532 sc->as_allocated -= mem->am_size;
533 TAILQ_REMOVE(&sc->as_memory, mem, am_link);
534 vm_object_deallocate(mem->am_obj);
535 free(mem, M_AGP);
536 return 0;
537 }
538
539 int
agp_generic_bind_memory(device_t dev,struct agp_memory * mem,vm_offset_t offset)540 agp_generic_bind_memory(device_t dev, struct agp_memory *mem,
541 vm_offset_t offset)
542 {
543 struct pctrie_iter pages;
544 struct agp_softc *sc = device_get_softc(dev);
545 vm_offset_t i, j, k;
546 vm_page_t m;
547 int error;
548
549 /* Do some sanity checks first. */
550 if ((offset & (AGP_PAGE_SIZE - 1)) != 0 ||
551 offset + mem->am_size > AGP_GET_APERTURE(dev)) {
552 device_printf(dev, "binding memory at bad offset %#x\n",
553 (int)offset);
554 return EINVAL;
555 }
556
557 /*
558 * Allocate the pages early, before acquiring the lock,
559 * because vm_page_grab() may sleep and we can't hold a mutex
560 * while sleeping.
561 */
562 VM_OBJECT_WLOCK(mem->am_obj);
563 for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
564 /*
565 * Find a page from the object and wire it
566 * down. This page will be mapped using one or more
567 * entries in the GATT (assuming that PAGE_SIZE >=
568 * AGP_PAGE_SIZE. If this is the first call to bind,
569 * the pages will be allocated and zeroed.
570 */
571 m = vm_page_grab(mem->am_obj, OFF_TO_IDX(i),
572 VM_ALLOC_WIRED | VM_ALLOC_ZERO);
573 AGP_DPF("found page pa=%#jx\n", (uintmax_t)VM_PAGE_TO_PHYS(m));
574 }
575 VM_OBJECT_WUNLOCK(mem->am_obj);
576 vm_page_iter_init(&pages, mem->am_obj);
577 mtx_lock(&sc->as_lock);
578
579 if (mem->am_is_bound) {
580 device_printf(dev, "memory already bound\n");
581 error = EINVAL;
582 VM_OBJECT_WLOCK(mem->am_obj);
583 i = 0;
584 goto bad;
585 }
586
587 /*
588 * Bind the individual pages and flush the chipset's
589 * TLB.
590 */
591 VM_OBJECT_WLOCK(mem->am_obj);
592 for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
593 m = vm_radix_iter_lookup(&pages, OFF_TO_IDX(i));
594
595 /*
596 * Install entries in the GATT, making sure that if
597 * AGP_PAGE_SIZE < PAGE_SIZE and mem->am_size is not
598 * aligned to PAGE_SIZE, we don't modify too many GATT
599 * entries.
600 */
601 for (j = 0; j < PAGE_SIZE && i + j < mem->am_size;
602 j += AGP_PAGE_SIZE) {
603 vm_offset_t pa = VM_PAGE_TO_PHYS(m) + j;
604 AGP_DPF("binding offset %#jx to pa %#jx\n",
605 (uintmax_t)offset + i + j, (uintmax_t)pa);
606 error = AGP_BIND_PAGE(dev, offset + i + j, pa);
607 if (error) {
608 /*
609 * Bail out. Reverse all the mappings
610 * and unwire the pages.
611 */
612 for (k = 0; k < i + j; k += AGP_PAGE_SIZE)
613 AGP_UNBIND_PAGE(dev, offset + k);
614 goto bad;
615 }
616 }
617 vm_page_xunbusy(m);
618 }
619 VM_OBJECT_WUNLOCK(mem->am_obj);
620
621 /*
622 * Make sure the chipset gets the new mappings.
623 */
624 AGP_FLUSH_TLB(dev);
625
626 mem->am_offset = offset;
627 mem->am_is_bound = 1;
628
629 mtx_unlock(&sc->as_lock);
630
631 return 0;
632 bad:
633 mtx_unlock(&sc->as_lock);
634 VM_OBJECT_ASSERT_WLOCKED(mem->am_obj);
635 for (k = 0; k < mem->am_size; k += PAGE_SIZE) {
636 m = vm_radix_iter_lookup(&pages, OFF_TO_IDX(k));
637 if (k >= i)
638 vm_page_xunbusy(m);
639 vm_page_unwire(m, PQ_INACTIVE);
640 }
641 VM_OBJECT_WUNLOCK(mem->am_obj);
642
643 return error;
644 }
645
646 int
agp_generic_unbind_memory(device_t dev,struct agp_memory * mem)647 agp_generic_unbind_memory(device_t dev, struct agp_memory *mem)
648 {
649 struct pctrie_iter pages;
650 struct agp_softc *sc = device_get_softc(dev);
651 vm_page_t m;
652 int i;
653
654 mtx_lock(&sc->as_lock);
655
656 if (!mem->am_is_bound) {
657 device_printf(dev, "memory is not bound\n");
658 mtx_unlock(&sc->as_lock);
659 return EINVAL;
660 }
661
662 /*
663 * Unbind the individual pages and flush the chipset's
664 * TLB. Unwire the pages so they can be swapped.
665 */
666 for (i = 0; i < mem->am_size; i += AGP_PAGE_SIZE)
667 AGP_UNBIND_PAGE(dev, mem->am_offset + i);
668
669 AGP_FLUSH_TLB(dev);
670
671 vm_page_iter_init(&pages, mem->am_obj);
672 VM_OBJECT_WLOCK(mem->am_obj);
673 for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
674 m = vm_radix_iter_lookup(&pages, atop(i));
675 vm_page_unwire(m, PQ_INACTIVE);
676 }
677 VM_OBJECT_WUNLOCK(mem->am_obj);
678
679 mem->am_offset = 0;
680 mem->am_is_bound = 0;
681
682 mtx_unlock(&sc->as_lock);
683
684 return 0;
685 }
686
687 /* Helper functions for implementing user/kernel api */
688
689 static int
agp_acquire_helper(device_t dev,enum agp_acquire_state state)690 agp_acquire_helper(device_t dev, enum agp_acquire_state state)
691 {
692 struct agp_softc *sc = device_get_softc(dev);
693
694 if (sc->as_state != AGP_ACQUIRE_FREE)
695 return EBUSY;
696 sc->as_state = state;
697
698 return 0;
699 }
700
701 static int
agp_release_helper(device_t dev,enum agp_acquire_state state)702 agp_release_helper(device_t dev, enum agp_acquire_state state)
703 {
704 struct agp_softc *sc = device_get_softc(dev);
705
706 if (sc->as_state == AGP_ACQUIRE_FREE)
707 return 0;
708
709 if (sc->as_state != state)
710 return EBUSY;
711
712 sc->as_state = AGP_ACQUIRE_FREE;
713 return 0;
714 }
715
716 static struct agp_memory *
agp_find_memory(device_t dev,int id)717 agp_find_memory(device_t dev, int id)
718 {
719 struct agp_softc *sc = device_get_softc(dev);
720 struct agp_memory *mem;
721
722 AGP_DPF("searching for memory block %d\n", id);
723 TAILQ_FOREACH(mem, &sc->as_memory, am_link) {
724 AGP_DPF("considering memory block %d\n", mem->am_id);
725 if (mem->am_id == id)
726 return mem;
727 }
728 return 0;
729 }
730
731 /* Implementation of the userland ioctl api */
732
733 static int
agp_info_user(device_t dev,agp_info * info)734 agp_info_user(device_t dev, agp_info *info)
735 {
736 struct agp_softc *sc = device_get_softc(dev);
737
738 bzero(info, sizeof *info);
739 info->bridge_id = pci_get_devid(dev);
740 info->agp_mode =
741 pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
742 if (sc->as_aperture)
743 info->aper_base = rman_get_start(sc->as_aperture);
744 else
745 info->aper_base = 0;
746 info->aper_size = AGP_GET_APERTURE(dev) >> 20;
747 info->pg_total = info->pg_system = sc->as_maxmem >> AGP_PAGE_SHIFT;
748 info->pg_used = sc->as_allocated >> AGP_PAGE_SHIFT;
749
750 return 0;
751 }
752
753 static int
agp_setup_user(device_t dev,agp_setup * setup)754 agp_setup_user(device_t dev, agp_setup *setup)
755 {
756 return AGP_ENABLE(dev, setup->agp_mode);
757 }
758
759 static int
agp_allocate_user(device_t dev,agp_allocate * alloc)760 agp_allocate_user(device_t dev, agp_allocate *alloc)
761 {
762 struct agp_memory *mem;
763
764 mem = AGP_ALLOC_MEMORY(dev,
765 alloc->type,
766 alloc->pg_count << AGP_PAGE_SHIFT);
767 if (mem) {
768 alloc->key = mem->am_id;
769 alloc->physical = mem->am_physical;
770 return 0;
771 } else {
772 return ENOMEM;
773 }
774 }
775
776 static int
agp_deallocate_user(device_t dev,int id)777 agp_deallocate_user(device_t dev, int id)
778 {
779 struct agp_memory *mem = agp_find_memory(dev, id);
780
781 if (mem) {
782 AGP_FREE_MEMORY(dev, mem);
783 return 0;
784 } else {
785 return ENOENT;
786 }
787 }
788
789 static int
agp_bind_user(device_t dev,agp_bind * bind)790 agp_bind_user(device_t dev, agp_bind *bind)
791 {
792 struct agp_memory *mem = agp_find_memory(dev, bind->key);
793
794 if (!mem)
795 return ENOENT;
796
797 return AGP_BIND_MEMORY(dev, mem, bind->pg_start << AGP_PAGE_SHIFT);
798 }
799
800 static int
agp_unbind_user(device_t dev,agp_unbind * unbind)801 agp_unbind_user(device_t dev, agp_unbind *unbind)
802 {
803 struct agp_memory *mem = agp_find_memory(dev, unbind->key);
804
805 if (!mem)
806 return ENOENT;
807
808 return AGP_UNBIND_MEMORY(dev, mem);
809 }
810
811 static int
agp_chipset_flush(device_t dev)812 agp_chipset_flush(device_t dev)
813 {
814
815 return (AGP_CHIPSET_FLUSH(dev));
816 }
817
818 static int
agp_open(struct cdev * kdev,int oflags,int devtype,struct thread * td)819 agp_open(struct cdev *kdev, int oflags, int devtype, struct thread *td)
820 {
821 device_t dev = kdev->si_drv1;
822 struct agp_softc *sc = device_get_softc(dev);
823
824 if (!sc->as_isopen) {
825 sc->as_isopen = 1;
826 device_busy(dev);
827 }
828
829 return 0;
830 }
831
832 static int
agp_close(struct cdev * kdev,int fflag,int devtype,struct thread * td)833 agp_close(struct cdev *kdev, int fflag, int devtype, struct thread *td)
834 {
835 device_t dev = kdev->si_drv1;
836 struct agp_softc *sc = device_get_softc(dev);
837 struct agp_memory *mem;
838
839 /*
840 * Clear the GATT and force release on last close
841 */
842 while ((mem = TAILQ_FIRST(&sc->as_memory)) != NULL) {
843 if (mem->am_is_bound)
844 AGP_UNBIND_MEMORY(dev, mem);
845 AGP_FREE_MEMORY(dev, mem);
846 }
847 if (sc->as_state == AGP_ACQUIRE_USER)
848 agp_release_helper(dev, AGP_ACQUIRE_USER);
849 sc->as_isopen = 0;
850 device_unbusy(dev);
851
852 return 0;
853 }
854
855 static int
agp_ioctl(struct cdev * kdev,u_long cmd,caddr_t data,int fflag,struct thread * td)856 agp_ioctl(struct cdev *kdev, u_long cmd, caddr_t data, int fflag, struct thread *td)
857 {
858 device_t dev = kdev->si_drv1;
859
860 switch (cmd) {
861 case AGPIOC_INFO:
862 return agp_info_user(dev, (agp_info *) data);
863
864 case AGPIOC_ACQUIRE:
865 return agp_acquire_helper(dev, AGP_ACQUIRE_USER);
866
867 case AGPIOC_RELEASE:
868 return agp_release_helper(dev, AGP_ACQUIRE_USER);
869
870 case AGPIOC_SETUP:
871 return agp_setup_user(dev, (agp_setup *)data);
872
873 case AGPIOC_ALLOCATE:
874 return agp_allocate_user(dev, (agp_allocate *)data);
875
876 case AGPIOC_DEALLOCATE:
877 return agp_deallocate_user(dev, *(int *) data);
878
879 case AGPIOC_BIND:
880 return agp_bind_user(dev, (agp_bind *)data);
881
882 case AGPIOC_UNBIND:
883 return agp_unbind_user(dev, (agp_unbind *)data);
884
885 case AGPIOC_CHIPSET_FLUSH:
886 return agp_chipset_flush(dev);
887 }
888
889 return EINVAL;
890 }
891
892 static int
agp_mmap(struct cdev * kdev,vm_ooffset_t offset,vm_paddr_t * paddr,int prot,vm_memattr_t * memattr)893 agp_mmap(struct cdev *kdev, vm_ooffset_t offset, vm_paddr_t *paddr,
894 int prot, vm_memattr_t *memattr)
895 {
896 device_t dev = kdev->si_drv1;
897 struct agp_softc *sc = device_get_softc(dev);
898
899 if (offset > AGP_GET_APERTURE(dev))
900 return -1;
901 if (sc->as_aperture == NULL)
902 return -1;
903 *paddr = rman_get_start(sc->as_aperture) + offset;
904 return 0;
905 }
906
907 /* Implementation of the kernel api */
908
909 device_t
agp_find_device(void)910 agp_find_device(void)
911 {
912 device_t *children, child;
913 int i, count;
914
915 if (!agp_devclass)
916 return NULL;
917 if (devclass_get_devices(agp_devclass, &children, &count) != 0)
918 return NULL;
919 child = NULL;
920 for (i = 0; i < count; i++) {
921 if (device_is_attached(children[i])) {
922 child = children[i];
923 break;
924 }
925 }
926 free(children, M_TEMP);
927 return child;
928 }
929
930 enum agp_acquire_state
agp_state(device_t dev)931 agp_state(device_t dev)
932 {
933 struct agp_softc *sc = device_get_softc(dev);
934 return sc->as_state;
935 }
936
937 void
agp_get_info(device_t dev,struct agp_info * info)938 agp_get_info(device_t dev, struct agp_info *info)
939 {
940 struct agp_softc *sc = device_get_softc(dev);
941
942 info->ai_mode =
943 pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
944 if (sc->as_aperture != NULL)
945 info->ai_aperture_base = rman_get_start(sc->as_aperture);
946 else
947 info->ai_aperture_base = 0;
948 info->ai_aperture_size = AGP_GET_APERTURE(dev);
949 info->ai_memory_allowed = sc->as_maxmem;
950 info->ai_memory_used = sc->as_allocated;
951 }
952
953 int
agp_acquire(device_t dev)954 agp_acquire(device_t dev)
955 {
956 return agp_acquire_helper(dev, AGP_ACQUIRE_KERNEL);
957 }
958
959 int
agp_release(device_t dev)960 agp_release(device_t dev)
961 {
962 return agp_release_helper(dev, AGP_ACQUIRE_KERNEL);
963 }
964
965 int
agp_enable(device_t dev,u_int32_t mode)966 agp_enable(device_t dev, u_int32_t mode)
967 {
968 return AGP_ENABLE(dev, mode);
969 }
970
agp_alloc_memory(device_t dev,int type,vm_size_t bytes)971 void *agp_alloc_memory(device_t dev, int type, vm_size_t bytes)
972 {
973 return (void *) AGP_ALLOC_MEMORY(dev, type, bytes);
974 }
975
agp_free_memory(device_t dev,void * handle)976 void agp_free_memory(device_t dev, void *handle)
977 {
978 struct agp_memory *mem = (struct agp_memory *) handle;
979 AGP_FREE_MEMORY(dev, mem);
980 }
981
agp_bind_memory(device_t dev,void * handle,vm_offset_t offset)982 int agp_bind_memory(device_t dev, void *handle, vm_offset_t offset)
983 {
984 struct agp_memory *mem = (struct agp_memory *) handle;
985 return AGP_BIND_MEMORY(dev, mem, offset);
986 }
987
agp_unbind_memory(device_t dev,void * handle)988 int agp_unbind_memory(device_t dev, void *handle)
989 {
990 struct agp_memory *mem = (struct agp_memory *) handle;
991 return AGP_UNBIND_MEMORY(dev, mem);
992 }
993
agp_memory_info(device_t dev,void * handle,struct agp_memory_info * mi)994 void agp_memory_info(device_t dev, void *handle, struct
995 agp_memory_info *mi)
996 {
997 struct agp_memory *mem = (struct agp_memory *) handle;
998
999 mi->ami_size = mem->am_size;
1000 mi->ami_physical = mem->am_physical;
1001 mi->ami_offset = mem->am_offset;
1002 mi->ami_is_bound = mem->am_is_bound;
1003 }
1004
1005 int
agp_bind_pages(device_t dev,vm_page_t * pages,vm_size_t size,vm_offset_t offset)1006 agp_bind_pages(device_t dev, vm_page_t *pages, vm_size_t size,
1007 vm_offset_t offset)
1008 {
1009 struct agp_softc *sc;
1010 vm_offset_t i, j, k, pa;
1011 vm_page_t m;
1012 int error;
1013
1014 if ((size & (AGP_PAGE_SIZE - 1)) != 0 ||
1015 (offset & (AGP_PAGE_SIZE - 1)) != 0)
1016 return (EINVAL);
1017
1018 sc = device_get_softc(dev);
1019
1020 mtx_lock(&sc->as_lock);
1021 for (i = 0; i < size; i += PAGE_SIZE) {
1022 m = pages[OFF_TO_IDX(i)];
1023 KASSERT(vm_page_wired(m),
1024 ("agp_bind_pages: page %p hasn't been wired", m));
1025
1026 /*
1027 * Install entries in the GATT, making sure that if
1028 * AGP_PAGE_SIZE < PAGE_SIZE and size is not
1029 * aligned to PAGE_SIZE, we don't modify too many GATT
1030 * entries.
1031 */
1032 for (j = 0; j < PAGE_SIZE && i + j < size; j += AGP_PAGE_SIZE) {
1033 pa = VM_PAGE_TO_PHYS(m) + j;
1034 AGP_DPF("binding offset %#jx to pa %#jx\n",
1035 (uintmax_t)offset + i + j, (uintmax_t)pa);
1036 error = AGP_BIND_PAGE(dev, offset + i + j, pa);
1037 if (error) {
1038 /*
1039 * Bail out. Reverse all the mappings.
1040 */
1041 for (k = 0; k < i + j; k += AGP_PAGE_SIZE)
1042 AGP_UNBIND_PAGE(dev, offset + k);
1043
1044 mtx_unlock(&sc->as_lock);
1045 return (error);
1046 }
1047 }
1048 }
1049
1050 AGP_FLUSH_TLB(dev);
1051
1052 mtx_unlock(&sc->as_lock);
1053 return (0);
1054 }
1055
1056 int
agp_unbind_pages(device_t dev,vm_size_t size,vm_offset_t offset)1057 agp_unbind_pages(device_t dev, vm_size_t size, vm_offset_t offset)
1058 {
1059 struct agp_softc *sc;
1060 vm_offset_t i;
1061
1062 if ((size & (AGP_PAGE_SIZE - 1)) != 0 ||
1063 (offset & (AGP_PAGE_SIZE - 1)) != 0)
1064 return (EINVAL);
1065
1066 sc = device_get_softc(dev);
1067
1068 mtx_lock(&sc->as_lock);
1069 for (i = 0; i < size; i += AGP_PAGE_SIZE)
1070 AGP_UNBIND_PAGE(dev, offset + i);
1071
1072 AGP_FLUSH_TLB(dev);
1073
1074 mtx_unlock(&sc->as_lock);
1075 return (0);
1076 }
1077