Lines Matching +full:vm +full:- +full:map
1 // SPDX-License-Identifier: GPL-2.0-only
26 * Find or create vm block based on requested @size.
30 get_vm_block(struct ct_vm *vm, unsigned int size, struct ct_atc *atc)
36 if (size > vm->size) {
37 dev_err(atc->card->dev,
42 guard(mutex)(&vm->lock);
43 list_for_each(pos, &vm->unused) {
45 if (entry->size >= size)
48 if (pos == &vm->unused)
51 if (entry->size == size) {
52 /* Move the vm node from unused list to used list directly */
53 list_move(&entry->list, &vm->used);
54 vm->size -= size;
62 block->addr = entry->addr;
63 block->size = size;
64 list_add(&block->list, &vm->used);
65 entry->addr += size;
66 entry->size -= size;
67 vm->size -= size;
72 static void put_vm_block(struct ct_vm *vm, struct ct_vm_block *block)
77 block->size = CT_PAGE_ALIGN(block->size);
79 guard(mutex)(&vm->lock);
80 list_del(&block->list);
81 vm->size += block->size;
83 list_for_each(pos, &vm->unused) {
85 if (entry->addr >= (block->addr + block->size))
88 if (pos == &vm->unused) {
89 list_add_tail(&block->list, &vm->unused);
92 if ((block->addr + block->size) == entry->addr) {
93 entry->addr = block->addr;
94 entry->size += block->size;
97 __list_add(&block->list, pos->prev, pos);
102 pos = &entry->list;
103 pre = pos->prev;
104 while (pre != &vm->unused) {
107 if ((pre_ent->addr + pre_ent->size) > entry->addr)
110 pre_ent->size += entry->size;
114 pre = pos->prev;
118 /* Map host addr (kmalloced/vmalloced) to device logical addr. */
120 ct_vm_map(struct ct_vm *vm, struct snd_pcm_substream *substream, int size)
128 block = get_vm_block(vm, size, atc);
130 dev_err(atc->card->dev,
135 ptp = (unsigned long *)vm->ptp[0].area;
136 pte_start = (block->addr >> CT_PAGE_SHIFT);
137 pages = block->size >> CT_PAGE_SHIFT;
144 block->size = size;
148 static void ct_vm_unmap(struct ct_vm *vm, struct ct_vm_block *block)
151 put_vm_block(vm, block);
155 * return the host physical addr of the @index-th device
160 ct_get_ptp_phys(struct ct_vm *vm, int index)
162 return (index >= CT_PTP_NUM) ? ~0UL : vm->ptp[index].addr;
167 struct ct_vm *vm;
173 vm = kzalloc(sizeof(*vm), GFP_KERNEL);
174 if (!vm)
175 return -ENOMEM;
177 mutex_init(&vm->lock);
182 &pci->dev,
183 PAGE_SIZE, &vm->ptp[i]);
189 ct_vm_destroy(vm);
190 return -ENOMEM;
192 vm->size = CT_ADDRS_PER_PAGE * i;
193 vm->map = ct_vm_map;
194 vm->unmap = ct_vm_unmap;
195 vm->get_ptp_phys = ct_get_ptp_phys;
196 INIT_LIST_HEAD(&vm->unused);
197 INIT_LIST_HEAD(&vm->used);
200 block->addr = 0;
201 block->size = vm->size;
202 list_add(&block->list, &vm->unused);
205 *rvm = vm;
211 void ct_vm_destroy(struct ct_vm *vm)
218 while (!list_empty(&vm->used)) {
219 pos = vm->used.next;
224 while (!list_empty(&vm->unused)) {
225 pos = vm->unused.next;
233 snd_dma_free_pages(&vm->ptp[i]);
235 vm->size = 0;
237 kfree(vm);