Lines Matching +full:memory +full:- +full:mapping
1 // SPDX-License-Identifier: MIT
26 * userspace-managable portion of the VA space. It provides operations to map
27 * and unmap memory. Mappings may be flagged as sparse. Sparse mappings are not
29 * alongside a sparse mapping.
31 * Userspace may request memory backed mappings either within or outside of the
33 * mapping. Subsequently requested memory backed mappings within a sparse
34 * mapping will take precedence over the corresponding range of the sparse
35 * mapping. If such memory backed mappings are unmapped the kernel will make
36 * sure that the corresponding sparse mapping will take their place again.
37 * Requests to unmap a sparse mapping that still contains memory backed mappings
38 * will result in those memory backed mappings being unmapped first.
42 * make sure to unmap all memory backed mappings within the given range,
43 * splitting up memory backed mappings which are only partially contained
45 * the range of a previously mapped sparse mapping exactly though.
47 * While the kernel generally permits arbitrary sequences and ranges of memory
52 * - unmap non-existent sparse mappings
53 * - unmap a sparse mapping and map a new sparse mapping overlapping the range
54 * of the previously unmapped sparse mapping within the same VM_BIND ioctl
55 * - unmap a sparse mapping and map new memory backed mappings overlapping the
56 * range of the previously unmapped sparse mapping within the same VM_BIND
59 * When using the VM_BIND ioctl to request the kernel to map memory to a given
61 * mappings are created in the GPU's MMU. If the given memory is swapped out
62 * at the time the bind operation is executed the kernel will stash the mapping
64 * the memory is swapped back in. While this is transparent for userspace, it is
65 * guaranteed that all the backing memory is swapped back in and all the memory
84 * be pending. Hence, EXEC jobs require to have the particular fences - of
85 * the corresponding VM_BIND jobs they depend on - attached to them.
93 struct nouveau_cli *cli = job->cli; in nouveau_exec_job_submit()
98 ret = nouveau_fence_create(&exec_job->fence, exec_job->chan); in nouveau_exec_job_submit()
123 drm_gpuvm_exec_resv_add_fence(vme, job->done_fence, in nouveau_exec_job_armed_submit()
124 job->resv_usage, job->resv_usage); in nouveau_exec_job_armed_submit()
132 struct nouveau_channel *chan = exec_job->chan; in nouveau_exec_job_run()
133 struct nouveau_fence *fence = exec_job->fence; in nouveau_exec_job_run()
136 ret = nvif_chan_gpfifo_wait(&chan->chan, exec_job->push.count + 1, 16); in nouveau_exec_job_run()
138 NV_PRINTK(err, job->cli, "nv50cal_space: %d\n", ret); in nouveau_exec_job_run()
142 for (i = 0; i < exec_job->push.count; i++) { in nouveau_exec_job_run()
143 struct drm_nouveau_exec_push *p = &exec_job->push.s[i]; in nouveau_exec_job_run()
144 bool no_prefetch = p->flags & DRM_NOUVEAU_EXEC_PUSH_NO_PREFETCH; in nouveau_exec_job_run()
146 nvif_chan_gpfifo_push(&chan->chan, p->va, p->va_len, no_prefetch); in nouveau_exec_job_run()
149 nvif_chan_gpfifo_post(&chan->chan); in nouveau_exec_job_run()
153 nouveau_fence_unref(&exec_job->fence); in nouveau_exec_job_run()
154 NV_PRINTK(err, job->cli, "error fencing pushbuf: %d\n", ret); in nouveau_exec_job_run()
162 exec_job->fence = NULL; in nouveau_exec_job_run()
164 return &fence->base; in nouveau_exec_job_run()
175 kfree(exec_job->fence); in nouveau_exec_job_free()
176 kfree(exec_job->push.s); in nouveau_exec_job_free()
184 struct nouveau_channel *chan = exec_job->chan; in nouveau_exec_job_timeout()
186 if (unlikely(!atomic_read(&chan->killed))) in nouveau_exec_job_timeout()
189 NV_PRINTK(warn, job->cli, "job timeout, channel %d killed!\n", in nouveau_exec_job_timeout()
190 chan->chid); in nouveau_exec_job_timeout()
211 for (i = 0; i < __args->push.count; i++) { in nouveau_exec_job_init()
212 struct drm_nouveau_exec_push *p = &__args->push.s[i]; in nouveau_exec_job_init()
214 if (unlikely(p->va_len > NV50_DMA_PUSH_MAX_LENGTH)) { in nouveau_exec_job_init()
215 NV_PRINTK(err, nouveau_cli(__args->file_priv), in nouveau_exec_job_init()
217 p->va_len, NV50_DMA_PUSH_MAX_LENGTH); in nouveau_exec_job_init()
218 return -EINVAL; in nouveau_exec_job_init()
224 return -ENOMEM; in nouveau_exec_job_init()
226 job->push.count = __args->push.count; in nouveau_exec_job_init()
227 if (__args->push.count) { in nouveau_exec_job_init()
228 job->push.s = kmemdup(__args->push.s, in nouveau_exec_job_init()
229 sizeof(*__args->push.s) * in nouveau_exec_job_init()
230 __args->push.count, in nouveau_exec_job_init()
232 if (!job->push.s) { in nouveau_exec_job_init()
233 ret = -ENOMEM; in nouveau_exec_job_init()
238 args.file_priv = __args->file_priv; in nouveau_exec_job_init()
239 job->chan = __args->chan; in nouveau_exec_job_init()
241 args.sched = __args->sched; in nouveau_exec_job_init()
243 args.credits = job->push.count + 1; in nouveau_exec_job_init()
245 args.in_sync.count = __args->in_sync.count; in nouveau_exec_job_init()
246 args.in_sync.s = __args->in_sync.s; in nouveau_exec_job_init()
248 args.out_sync.count = __args->out_sync.count; in nouveau_exec_job_init()
249 args.out_sync.s = __args->out_sync.s; in nouveau_exec_job_init()
254 ret = nouveau_job_init(&job->base, &args); in nouveau_exec_job_init()
261 kfree(job->push.s); in nouveau_exec_job_init()
279 ret = nouveau_job_submit(&job->base); in nouveau_exec()
286 nouveau_job_fini(&job->base); in nouveau_exec()
295 u32 inc = req->wait_count; in nouveau_exec_ucopy()
296 u64 ins = req->wait_ptr; in nouveau_exec_ucopy()
297 u32 outc = req->sig_count; in nouveau_exec_ucopy()
298 u64 outs = req->sig_ptr; in nouveau_exec_ucopy()
299 u32 pushc = req->push_count; in nouveau_exec_ucopy()
300 u64 pushs = req->push_ptr; in nouveau_exec_ucopy()
304 args->push.count = pushc; in nouveau_exec_ucopy()
305 args->push.s = u_memcpya(pushs, pushc, sizeof(*args->push.s)); in nouveau_exec_ucopy()
306 if (IS_ERR(args->push.s)) in nouveau_exec_ucopy()
307 return PTR_ERR(args->push.s); in nouveau_exec_ucopy()
311 s = &args->in_sync.s; in nouveau_exec_ucopy()
313 args->in_sync.count = inc; in nouveau_exec_ucopy()
322 s = &args->out_sync.s; in nouveau_exec_ucopy()
324 args->out_sync.count = outc; in nouveau_exec_ucopy()
335 u_free(args->push.s); in nouveau_exec_ucopy()
337 u_free(args->in_sync.s); in nouveau_exec_ucopy()
344 u_free(args->push.s); in nouveau_exec_ufree()
345 u_free(args->in_sync.s); in nouveau_exec_ufree()
346 u_free(args->out_sync.s); in nouveau_exec_ufree()
363 return -ENOMEM; in nouveau_exec_ioctl_exec()
367 return nouveau_abi16_put(abi16, -ENOSYS); in nouveau_exec_ioctl_exec()
369 list_for_each_entry(chan16, &abi16->channels, head) { in nouveau_exec_ioctl_exec()
370 if (chan16->chan->chid == req->channel) { in nouveau_exec_ioctl_exec()
371 chan = chan16->chan; in nouveau_exec_ioctl_exec()
377 return nouveau_abi16_put(abi16, -ENOENT); in nouveau_exec_ioctl_exec()
379 if (unlikely(atomic_read(&chan->killed))) in nouveau_exec_ioctl_exec()
380 return nouveau_abi16_put(abi16, -ENODEV); in nouveau_exec_ioctl_exec()
382 if (chan->user.oclass < NV50_CHANNEL_GPFIFO) in nouveau_exec_ioctl_exec()
383 return nouveau_abi16_put(abi16, -ENOSYS); in nouveau_exec_ioctl_exec()
385 push_max = nouveau_exec_push_max_from_ib_max(chan->chan.gpfifo.max); in nouveau_exec_ioctl_exec()
386 if (unlikely(req->push_count > push_max)) { in nouveau_exec_ioctl_exec()
388 req->push_count, push_max); in nouveau_exec_ioctl_exec()
389 return nouveau_abi16_put(abi16, -EINVAL); in nouveau_exec_ioctl_exec()
396 args.sched = chan16->sched; in nouveau_exec_ioctl_exec()