Lines Matching defs:vm

26  * Find or create vm block based on requested @size.
30 get_vm_block(struct ct_vm *vm, unsigned int size, struct ct_atc *atc)
36 if (size > vm->size) {
42 guard(mutex)(&vm->lock);
43 list_for_each(pos, &vm->unused) {
48 if (pos == &vm->unused)
52 /* Move the vm node from unused list to used list directly */
53 list_move(&entry->list, &vm->used);
54 vm->size -= size;
64 list_add(&block->list, &vm->used);
67 vm->size -= size;
72 static void put_vm_block(struct ct_vm *vm, struct ct_vm_block *block)
79 guard(mutex)(&vm->lock);
81 vm->size += block->size;
83 list_for_each(pos, &vm->unused) {
88 if (pos == &vm->unused) {
89 list_add_tail(&block->list, &vm->unused);
104 while (pre != &vm->unused) {
120 ct_vm_map(struct ct_vm *vm, struct snd_pcm_substream *substream, int size)
128 block = get_vm_block(vm, size, atc);
135 ptp = (unsigned long *)vm->ptp[0].area;
148 static void ct_vm_unmap(struct ct_vm *vm, struct ct_vm_block *block)
151 put_vm_block(vm, block);
160 ct_get_ptp_phys(struct ct_vm *vm, int index)
162 return (index >= CT_PTP_NUM) ? ~0UL : vm->ptp[index].addr;
167 struct ct_vm *vm;
173 vm = kzalloc(sizeof(*vm), GFP_KERNEL);
174 if (!vm)
177 mutex_init(&vm->lock);
183 PAGE_SIZE, &vm->ptp[i]);
189 ct_vm_destroy(vm);
192 vm->size = CT_ADDRS_PER_PAGE * i;
193 vm->map = ct_vm_map;
194 vm->unmap = ct_vm_unmap;
195 vm->get_ptp_phys = ct_get_ptp_phys;
196 INIT_LIST_HEAD(&vm->unused);
197 INIT_LIST_HEAD(&vm->used);
201 block->size = vm->size;
202 list_add(&block->list, &vm->unused);
205 *rvm = vm;
211 void ct_vm_destroy(struct ct_vm *vm)
218 while (!list_empty(&vm->used)) {
219 pos = vm->used.next;
224 while (!list_empty(&vm->unused)) {
225 pos = vm->unused.next;
233 snd_dma_free_pages(&vm->ptp[i]);
235 vm->size = 0;
237 kfree(vm);