xref: /freebsd/sys/contrib/vchiq/interface/vchiq_arm/vchiq_2835_arm.c (revision 43a5ec4eb41567cc92586503212743d89686d78f)
1 /**
2  * Copyright (c) 2010-2012 Broadcom. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions, and the following disclaimer,
9  *    without modification.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. The names of the above-listed copyright holders may not be used
14  *    to endorse or promote products derived from this software without
15  *    specific prior written permission.
16  *
17  * ALTERNATIVELY, this software may be distributed under the terms of the
18  * GNU General Public License ("GPL") version 2, as published by the Free
19  * Software Foundation.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
22  * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
23  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
25  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
26  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
27  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
28  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <interface/compat/vchi_bsd.h>
35 
36 #include <sys/malloc.h>
37 #include <sys/rwlock.h>
38 
39 #include <vm/vm.h>
40 #include <vm/pmap.h>
41 #include <vm/vm_extern.h>
42 #include <vm/vm_kern.h>
43 #include <vm/vm_map.h>
44 #include <vm/vm_object.h>
45 #include <vm/vm_page.h>
46 #include <vm/vm_pager.h>
47 #include <vm/vm_param.h>
48 
49 #include <machine/bus.h>
50 #include <machine/cpu.h>
51 #include <arm/broadcom/bcm2835/bcm2835_mbox.h>
52 #include <arm/broadcom/bcm2835/bcm2835_vcbus.h>
53 
54 MALLOC_DEFINE(M_VCPAGELIST, "vcpagelist", "VideoCore pagelist memory");
55 
56 #define TOTAL_SLOTS (VCHIQ_SLOT_ZERO_SLOTS + 2 * 32)
57 
58 #define VCHIQ_DOORBELL_IRQ IRQ_ARM_DOORBELL_0
59 #define VCHIQ_ARM_ADDRESS(x) ((void *)PHYS_TO_VCBUS(pmap_kextract((vm_offset_t)(x))))
60 
61 #include "vchiq_arm.h"
62 #include "vchiq_2835.h"
63 #include "vchiq_connected.h"
64 #include "vchiq_killable.h"
65 
66 #define MAX_FRAGMENTS (VCHIQ_NUM_CURRENT_BULKS * 2)
67 
68 int g_cache_line_size = 32;
69 static int g_fragment_size;
70 
71 typedef struct vchiq_2835_state_struct {
72    int inited;
73    VCHIQ_ARM_STATE_T arm_state;
74 } VCHIQ_2835_ARM_STATE_T;
75 
76 static char *g_slot_mem;
77 static int g_slot_mem_size;
78 vm_paddr_t g_slot_phys;
79 /* BSD DMA */
80 bus_dma_tag_t bcm_slots_dma_tag;
81 bus_dmamap_t bcm_slots_dma_map;
82 
83 static char *g_fragments_base;
84 static char *g_free_fragments;
85 struct semaphore g_free_fragments_sema;
86 
87 static DEFINE_SEMAPHORE(g_free_fragments_mutex);
88 
89 typedef struct bulkinfo_struct {
90 	PAGELIST_T	*pagelist;
91 	bus_dma_tag_t	pagelist_dma_tag;
92 	bus_dmamap_t	pagelist_dma_map;
93 	void		*buf;
94 	size_t		size;
95 } BULKINFO_T;
96 
97 static int
98 create_pagelist(char __user *buf, size_t count, unsigned short type,
99                 struct proc *p, BULKINFO_T *bi);
100 
101 static void
102 free_pagelist(BULKINFO_T *bi, int actual);
103 
104 static void
105 vchiq_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int err)
106 {
107 	bus_addr_t *addr;
108 
109 	if (err)
110 		return;
111 
112 	addr = (bus_addr_t*)arg;
113 	*addr = PHYS_TO_VCBUS(segs[0].ds_addr);
114 }
115 
116 static int
117 copyout_page(vm_page_t p, size_t offset, void *kaddr, size_t size)
118 {
119         uint8_t *dst;
120 
121         dst = (uint8_t*)pmap_quick_enter_page(p);
122         if (!dst)
123                 return ENOMEM;
124 
125         memcpy(dst + offset, kaddr, size);
126 
127         pmap_quick_remove_page((vm_offset_t)dst);
128 
129         return 0;
130 }
131 
132 int __init
133 vchiq_platform_init(VCHIQ_STATE_T *state)
134 {
135 	VCHIQ_SLOT_ZERO_T *vchiq_slot_zero;
136 	int frag_mem_size;
137 	int err;
138 	int i;
139 
140 	/* Allocate space for the channels in coherent memory */
141 	g_slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE);
142 	g_fragment_size = 2*g_cache_line_size;
143 	frag_mem_size = PAGE_ALIGN(g_fragment_size * MAX_FRAGMENTS);
144 
145 	err = bus_dma_tag_create(
146 	    NULL,
147 	    PAGE_SIZE, 0,	       /* alignment, boundary */
148 	    BUS_SPACE_MAXADDR_32BIT,    /* lowaddr */
149 	    BUS_SPACE_MAXADDR,	  /* highaddr */
150 	    NULL, NULL,		 /* filter, filterarg */
151 	    g_slot_mem_size + frag_mem_size, 1,		/* maxsize, nsegments */
152 	    g_slot_mem_size + frag_mem_size, 0,		/* maxsegsize, flags */
153 	    NULL, NULL,		 /* lockfunc, lockarg */
154 	    &bcm_slots_dma_tag);
155 
156 	err = bus_dmamem_alloc(bcm_slots_dma_tag, (void **)&g_slot_mem,
157 	    BUS_DMA_COHERENT | BUS_DMA_WAITOK, &bcm_slots_dma_map);
158 	if (err) {
159 		vchiq_log_error(vchiq_core_log_level, "Unable to allocate channel memory");
160 		err = -ENOMEM;
161 		goto failed_alloc;
162 	}
163 
164 	err = bus_dmamap_load(bcm_slots_dma_tag, bcm_slots_dma_map, g_slot_mem,
165 	    g_slot_mem_size + frag_mem_size, vchiq_dmamap_cb,
166 	    &g_slot_phys, 0);
167 
168 	if (err) {
169 		vchiq_log_error(vchiq_core_log_level, "cannot load DMA map");
170 		err = -ENOMEM;
171 		goto failed_load;
172 	}
173 
174 	WARN_ON(((int)g_slot_mem & (PAGE_SIZE - 1)) != 0);
175 
176 	vchiq_slot_zero = vchiq_init_slots(g_slot_mem, g_slot_mem_size);
177 	if (!vchiq_slot_zero) {
178 		err = -EINVAL;
179 		goto failed_init_slots;
180 	}
181 
182 	vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] =
183 		(int)g_slot_phys + g_slot_mem_size;
184 	vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] =
185 		MAX_FRAGMENTS;
186 
187 	g_fragments_base = (char *)(g_slot_mem + g_slot_mem_size);
188 	g_slot_mem_size += frag_mem_size;
189 
190 	g_free_fragments = g_fragments_base;
191 	for (i = 0; i < (MAX_FRAGMENTS - 1); i++) {
192 		*(char **)&g_fragments_base[i*g_fragment_size] =
193 			&g_fragments_base[(i + 1)*g_fragment_size];
194 	}
195 	*(char **)&g_fragments_base[i*g_fragment_size] = NULL;
196 	_sema_init(&g_free_fragments_sema, MAX_FRAGMENTS);
197 
198 	if (vchiq_init_state(state, vchiq_slot_zero, 0/*slave*/) !=
199 		VCHIQ_SUCCESS) {
200 		err = -EINVAL;
201 		goto failed_vchiq_init;
202 	}
203 
204 	bcm_mbox_write(BCM2835_MBOX_CHAN_VCHIQ, (unsigned int)g_slot_phys);
205 
206 	vchiq_log_info(vchiq_arm_log_level,
207 		"vchiq_init - done (slots %x, phys %x)",
208 		(unsigned int)vchiq_slot_zero, g_slot_phys);
209 
210    vchiq_call_connected_callbacks();
211 
212    return 0;
213 
214 failed_vchiq_init:
215 failed_init_slots:
216 	bus_dmamap_unload(bcm_slots_dma_tag, bcm_slots_dma_map);
217 failed_load:
218 	bus_dmamem_free(bcm_slots_dma_tag, g_slot_mem, bcm_slots_dma_map);
219 failed_alloc:
220 	bus_dma_tag_destroy(bcm_slots_dma_tag);
221 
222    return err;
223 }
224 
225 void __exit
226 vchiq_platform_exit(VCHIQ_STATE_T *state)
227 {
228 
229 	bus_dmamap_unload(bcm_slots_dma_tag, bcm_slots_dma_map);
230 	bus_dmamem_free(bcm_slots_dma_tag, g_slot_mem, bcm_slots_dma_map);
231 	bus_dma_tag_destroy(bcm_slots_dma_tag);
232 }
233 
234 VCHIQ_STATUS_T
235 vchiq_platform_init_state(VCHIQ_STATE_T *state)
236 {
237    VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
238    state->platform_state = kzalloc(sizeof(VCHIQ_2835_ARM_STATE_T), GFP_KERNEL);
239    ((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->inited = 1;
240    status = vchiq_arm_init_state(state, &((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->arm_state);
241    if(status != VCHIQ_SUCCESS)
242    {
243       ((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->inited = 0;
244    }
245    return status;
246 }
247 
248 VCHIQ_ARM_STATE_T*
249 vchiq_platform_get_arm_state(VCHIQ_STATE_T *state)
250 {
251    if(!((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->inited)
252    {
253       BUG();
254    }
255    return &((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->arm_state;
256 }
257 
258 int
259 vchiq_copy_from_user(void *dst, const void *src, int size)
260 {
261 
262 	if (((vm_offset_t)(src)) < VM_MIN_KERNEL_ADDRESS) {
263 		int error = copyin(src, dst, size);
264 		return error ? VCHIQ_ERROR : VCHIQ_SUCCESS;
265 	}
266 	else
267 		bcopy(src, dst, size);
268 
269 	return 0;
270 }
271 
272 VCHIQ_STATUS_T
273 vchiq_prepare_bulk_data(VCHIQ_BULK_T *bulk, VCHI_MEM_HANDLE_T memhandle,
274 	void *offset, int size, int dir)
275 {
276 	BULKINFO_T *bi;
277 	int ret;
278 
279 	WARN_ON(memhandle != VCHI_MEM_HANDLE_INVALID);
280 	bi = malloc(sizeof(*bi), M_VCPAGELIST, M_WAITOK | M_ZERO);
281 	if (bi == NULL)
282 		return VCHIQ_ERROR;
283 
284 	ret = create_pagelist((char __user *)offset, size,
285 			(dir == VCHIQ_BULK_RECEIVE)
286 			? PAGELIST_READ
287 			: PAGELIST_WRITE,
288 			current,
289 			bi);
290 	if (ret != 0)
291 		return VCHIQ_ERROR;
292 
293 	bulk->handle = memhandle;
294 	bulk->data = VCHIQ_ARM_ADDRESS(bi->pagelist);
295 
296 	/* Store the pagelist address in remote_data, which isn't used by the
297 	   slave. */
298 	bulk->remote_data = bi;
299 
300 	return VCHIQ_SUCCESS;
301 }
302 
303 void
304 vchiq_complete_bulk(VCHIQ_BULK_T *bulk)
305 {
306 	if (bulk && bulk->remote_data && bulk->actual)
307 		free_pagelist((BULKINFO_T *)bulk->remote_data, bulk->actual);
308 }
309 
310 void
311 vchiq_transfer_bulk(VCHIQ_BULK_T *bulk)
312 {
313 	/*
314 	 * This should only be called on the master (VideoCore) side, but
315 	 * provide an implementation to avoid the need for ifdefery.
316 	 */
317 	BUG();
318 }
319 
320 void
321 vchiq_dump_platform_state(void *dump_context)
322 {
323 	char buf[80];
324 	int len;
325 	len = snprintf(buf, sizeof(buf),
326 		"  Platform: 2835 (VC master)");
327 	vchiq_dump(dump_context, buf, len + 1);
328 }
329 
330 VCHIQ_STATUS_T
331 vchiq_platform_suspend(VCHIQ_STATE_T *state)
332 {
333    return VCHIQ_ERROR;
334 }
335 
336 VCHIQ_STATUS_T
337 vchiq_platform_resume(VCHIQ_STATE_T *state)
338 {
339    return VCHIQ_SUCCESS;
340 }
341 
342 void
343 vchiq_platform_paused(VCHIQ_STATE_T *state)
344 {
345 }
346 
347 void
348 vchiq_platform_resumed(VCHIQ_STATE_T *state)
349 {
350 }
351 
352 int
353 vchiq_platform_videocore_wanted(VCHIQ_STATE_T* state)
354 {
355    return 1; // autosuspend not supported - videocore always wanted
356 }
357 
358 int
359 vchiq_platform_use_suspend_timer(void)
360 {
361    return 0;
362 }
363 void
364 vchiq_dump_platform_use_state(VCHIQ_STATE_T *state)
365 {
366 	vchiq_log_info(vchiq_arm_log_level, "Suspend timer not in use");
367 }
368 void
369 vchiq_platform_handle_timeout(VCHIQ_STATE_T *state)
370 {
371 	(void)state;
372 }
373 /*
374  * Local functions
375  */
376 
377 static void
378 pagelist_page_free(vm_page_t pp)
379 {
380 	vm_page_unwire(pp, PQ_INACTIVE);
381 }
382 
383 /* There is a potential problem with partial cache lines (pages?)
384 ** at the ends of the block when reading. If the CPU accessed anything in
385 ** the same line (page?) then it may have pulled old data into the cache,
386 ** obscuring the new data underneath. We can solve this by transferring the
387 ** partial cache lines separately, and allowing the ARM to copy into the
388 ** cached area.
389 
390 ** N.B. This implementation plays slightly fast and loose with the Linux
391 ** driver programming rules, e.g. its use of __virt_to_bus instead of
392 ** dma_map_single, but it isn't a multi-platform driver and it benefits
393 ** from increased speed as a result.
394 */
395 
396 static int
397 create_pagelist(char __user *buf, size_t count, unsigned short type,
398 	struct proc *p, BULKINFO_T *bi)
399 {
400 	PAGELIST_T *pagelist;
401 	vm_page_t* pages;
402 	unsigned long *addrs;
403 	unsigned int num_pages, i;
404 	vm_offset_t offset;
405 	int pagelist_size;
406 	char *addr, *base_addr, *next_addr;
407 	int run, addridx, actual_pages;
408 	int err;
409 	vm_paddr_t pagelist_phys;
410 	vm_paddr_t pa;
411 
412 	offset = (vm_offset_t)buf & (PAGE_SIZE - 1);
413 	num_pages = (count + offset + PAGE_SIZE - 1) / PAGE_SIZE;
414 
415 	bi->pagelist = NULL;
416 	bi->buf = buf;
417 	bi->size = count;
418 
419 	/* Allocate enough storage to hold the page pointers and the page
420 	** list
421 	*/
422 	pagelist_size = sizeof(PAGELIST_T) +
423 		(num_pages * sizeof(unsigned long)) +
424 		(num_pages * sizeof(pages[0]));
425 
426 	err = bus_dma_tag_create(
427 	    NULL,
428 	    PAGE_SIZE, 0,	       /* alignment, boundary */
429 	    BUS_SPACE_MAXADDR_32BIT,    /* lowaddr */
430 	    BUS_SPACE_MAXADDR,	  /* highaddr */
431 	    NULL, NULL,		 /* filter, filterarg */
432 	    pagelist_size, 1,		/* maxsize, nsegments */
433 	    pagelist_size, 0,		/* maxsegsize, flags */
434 	    NULL, NULL,		 /* lockfunc, lockarg */
435 	    &bi->pagelist_dma_tag);
436 
437 	err = bus_dmamem_alloc(bi->pagelist_dma_tag, (void **)&pagelist,
438 	    BUS_DMA_COHERENT | BUS_DMA_WAITOK, &bi->pagelist_dma_map);
439 	if (err) {
440 		vchiq_log_error(vchiq_core_log_level, "Unable to allocate pagelist memory");
441 		err = -ENOMEM;
442 		goto failed_alloc;
443 	}
444 
445 	err = bus_dmamap_load(bi->pagelist_dma_tag, bi->pagelist_dma_map, pagelist,
446 	    pagelist_size, vchiq_dmamap_cb,
447 	    &pagelist_phys, 0);
448 
449 	if (err) {
450 		vchiq_log_error(vchiq_core_log_level, "cannot load DMA map for pagelist memory");
451 		err = -ENOMEM;
452 		goto failed_load;
453 	}
454 
455 	vchiq_log_trace(vchiq_arm_log_level,
456 		"create_pagelist - %x (%d bytes @%p)", (unsigned int)pagelist, count, buf);
457 
458 	if (!pagelist)
459 		return -ENOMEM;
460 
461 	addrs = pagelist->addrs;
462 	pages = (vm_page_t*)(addrs + num_pages);
463 
464 	actual_pages = vm_fault_quick_hold_pages(&p->p_vmspace->vm_map,
465 	    (vm_offset_t)buf, count,
466 	    (type == PAGELIST_READ ? VM_PROT_WRITE : 0 ) | VM_PROT_READ, pages, num_pages);
467 
468 	if (actual_pages != num_pages) {
469 		vm_page_unhold_pages(pages, actual_pages);
470 		free(pagelist, M_VCPAGELIST);
471 		return (-ENOMEM);
472 	}
473 
474 	pagelist->length = count;
475 	pagelist->type = type;
476 	pagelist->offset = offset;
477 
478 	/* Group the pages into runs of contiguous pages */
479 
480 	base_addr = (void *)PHYS_TO_VCBUS(VM_PAGE_TO_PHYS(pages[0]));
481 	next_addr = base_addr + PAGE_SIZE;
482 	addridx = 0;
483 	run = 0;
484 
485 	for (i = 1; i < num_pages; i++) {
486 		addr = (void *)PHYS_TO_VCBUS(VM_PAGE_TO_PHYS(pages[i]));
487 		if ((addr == next_addr) && (run < (PAGE_SIZE - 1))) {
488 			next_addr += PAGE_SIZE;
489 			run++;
490 		} else {
491 			addrs[addridx] = (unsigned long)base_addr + run;
492 			addridx++;
493 			base_addr = addr;
494 			next_addr = addr + PAGE_SIZE;
495 			run = 0;
496 		}
497 	}
498 
499 	addrs[addridx] = (unsigned long)base_addr + run;
500 	addridx++;
501 
502 	/* Partial cache lines (fragments) require special measures */
503 	if ((type == PAGELIST_READ) &&
504 		((pagelist->offset & (g_cache_line_size - 1)) ||
505 		((pagelist->offset + pagelist->length) &
506 		(g_cache_line_size - 1)))) {
507 		char *fragments;
508 
509 		if (down_interruptible(&g_free_fragments_sema) != 0) {
510       			free(pagelist, M_VCPAGELIST);
511 			return -EINTR;
512 		}
513 
514 		WARN_ON(g_free_fragments == NULL);
515 
516 		down(&g_free_fragments_mutex);
517 		fragments = g_free_fragments;
518 		WARN_ON(fragments == NULL);
519 		g_free_fragments = *(char **) g_free_fragments;
520 		up(&g_free_fragments_mutex);
521 		pagelist->type =
522 			 PAGELIST_READ_WITH_FRAGMENTS +
523 			 (fragments - g_fragments_base)/g_fragment_size;
524 	}
525 
526 	pa = pmap_extract(PCPU_GET(curpmap), (vm_offset_t)buf);
527 	dcache_wbinv_poc((vm_offset_t)buf, pa, count);
528 
529 	bus_dmamap_sync(bi->pagelist_dma_tag, bi->pagelist_dma_map, BUS_DMASYNC_PREWRITE);
530 
531 	bi->pagelist = pagelist;
532 
533 	return 0;
534 
535 failed_load:
536 	bus_dmamem_free(bi->pagelist_dma_tag, bi->pagelist, bi->pagelist_dma_map);
537 failed_alloc:
538 	bus_dma_tag_destroy(bi->pagelist_dma_tag);
539 
540 	return err;
541 }
542 
543 static void
544 free_pagelist(BULKINFO_T *bi, int actual)
545 {
546 	vm_page_t*pages;
547 	unsigned int num_pages, i;
548 	PAGELIST_T *pagelist;
549 
550 	pagelist = bi->pagelist;
551 
552 	vchiq_log_trace(vchiq_arm_log_level,
553 		"free_pagelist - %x, %d (%lu bytes @%p)", (unsigned int)pagelist, actual, pagelist->length, bi->buf);
554 
555 	num_pages =
556 		(pagelist->length + pagelist->offset + PAGE_SIZE - 1) /
557 		PAGE_SIZE;
558 
559 	pages = (vm_page_t*)(pagelist->addrs + num_pages);
560 
561 	/* Deal with any partial cache lines (fragments) */
562 	if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS) {
563 		char *fragments = g_fragments_base +
564 			(pagelist->type - PAGELIST_READ_WITH_FRAGMENTS)*g_fragment_size;
565 		int head_bytes, tail_bytes;
566 		head_bytes = (g_cache_line_size - pagelist->offset) &
567 			(g_cache_line_size - 1);
568 		tail_bytes = (pagelist->offset + actual) &
569 			(g_cache_line_size - 1);
570 
571 		if ((actual >= 0) && (head_bytes != 0)) {
572 			if (head_bytes > actual)
573 				head_bytes = actual;
574 
575 			copyout_page(pages[0],
576 				pagelist->offset,
577 				fragments,
578 				head_bytes);
579 		}
580 
581 		if ((actual >= 0) && (head_bytes < actual) &&
582 			(tail_bytes != 0)) {
583 
584 			copyout_page(pages[num_pages-1],
585 				(((vm_offset_t)bi->buf + actual) % PAGE_SIZE) - tail_bytes,
586 				fragments + g_cache_line_size,
587 				tail_bytes);
588 		}
589 
590 		down(&g_free_fragments_mutex);
591 		*(char **) fragments = g_free_fragments;
592 		g_free_fragments = fragments;
593 		up(&g_free_fragments_mutex);
594 		up(&g_free_fragments_sema);
595 	}
596 
597 	for (i = 0; i < num_pages; i++) {
598 		if (pagelist->type != PAGELIST_WRITE) {
599 			vm_page_dirty(pages[i]);
600 			pagelist_page_free(pages[i]);
601 		}
602 	}
603 
604 	bus_dmamap_unload(bi->pagelist_dma_tag, bi->pagelist_dma_map);
605 	bus_dmamem_free(bi->pagelist_dma_tag, bi->pagelist, bi->pagelist_dma_map);
606 	bus_dma_tag_destroy(bi->pagelist_dma_tag);
607 
608 	free(bi, M_VCPAGELIST);
609 }
610