Lines Matching refs:man
154 struct vmw_cmdbuf_man *man; member
196 static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
198 static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context);
206 static int vmw_cmdbuf_cur_lock(struct vmw_cmdbuf_man *man, bool interruptible) in vmw_cmdbuf_cur_lock() argument
209 if (mutex_lock_interruptible(&man->cur_mutex)) in vmw_cmdbuf_cur_lock()
212 mutex_lock(&man->cur_mutex); in vmw_cmdbuf_cur_lock()
223 static void vmw_cmdbuf_cur_unlock(struct vmw_cmdbuf_man *man) in vmw_cmdbuf_cur_unlock() argument
225 mutex_unlock(&man->cur_mutex); in vmw_cmdbuf_cur_unlock()
244 dma_pool_free(header->man->dheaders, dheader, header->handle); in vmw_cmdbuf_header_inline_free()
258 struct vmw_cmdbuf_man *man = header->man; in __vmw_cmdbuf_header_free() local
260 lockdep_assert_held_once(&man->lock); in __vmw_cmdbuf_header_free()
268 wake_up_all(&man->alloc_queue); in __vmw_cmdbuf_header_free()
270 dma_pool_free(man->headers, header->cb_header, in __vmw_cmdbuf_header_free()
283 struct vmw_cmdbuf_man *man = header->man; in vmw_cmdbuf_header_free() local
290 spin_lock(&man->lock); in vmw_cmdbuf_header_free()
292 spin_unlock(&man->lock); in vmw_cmdbuf_header_free()
303 struct vmw_cmdbuf_man *man = header->man; in vmw_cmdbuf_header_submit() local
307 vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val); in vmw_cmdbuf_header_submit()
311 vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val); in vmw_cmdbuf_header_submit()
339 static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man, in vmw_cmdbuf_ctx_submit() argument
342 while (ctx->num_hw_submitted < man->max_hw_submitted && in vmw_cmdbuf_ctx_submit()
377 static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man, in vmw_cmdbuf_ctx_process() argument
383 vmw_cmdbuf_ctx_submit(man, ctx); in vmw_cmdbuf_ctx_process()
392 wake_up_all(&man->idle_queue); in vmw_cmdbuf_ctx_process()
401 list_add_tail(&entry->list, &man->error); in vmw_cmdbuf_ctx_process()
402 schedule_work(&man->work); in vmw_cmdbuf_ctx_process()
419 vmw_cmdbuf_ctx_submit(man, ctx); in vmw_cmdbuf_ctx_process()
434 static void vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man) in vmw_cmdbuf_man_process() argument
442 for_each_cmdbuf_ctx(man, i, ctx) in vmw_cmdbuf_man_process()
443 vmw_cmdbuf_ctx_process(man, ctx, ¬empty); in vmw_cmdbuf_man_process()
445 if (man->irq_on && !notempty) { in vmw_cmdbuf_man_process()
446 vmw_generic_waiter_remove(man->dev_priv, in vmw_cmdbuf_man_process()
448 &man->dev_priv->cmdbuf_waiters); in vmw_cmdbuf_man_process()
449 man->irq_on = false; in vmw_cmdbuf_man_process()
450 } else if (!man->irq_on && notempty) { in vmw_cmdbuf_man_process()
451 vmw_generic_waiter_add(man->dev_priv, in vmw_cmdbuf_man_process()
453 &man->dev_priv->cmdbuf_waiters); in vmw_cmdbuf_man_process()
454 man->irq_on = true; in vmw_cmdbuf_man_process()
474 static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man, in vmw_cmdbuf_ctx_add() argument
481 list_add_tail(&header->list, &man->ctx[cb_context].submitted); in vmw_cmdbuf_ctx_add()
483 vmw_cmdbuf_man_process(man); in vmw_cmdbuf_ctx_add()
496 void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man) in vmw_cmdbuf_irqthread() argument
498 spin_lock(&man->lock); in vmw_cmdbuf_irqthread()
499 vmw_cmdbuf_man_process(man); in vmw_cmdbuf_irqthread()
500 spin_unlock(&man->lock); in vmw_cmdbuf_irqthread()
514 struct vmw_cmdbuf_man *man = in vmw_cmdbuf_work_func() local
524 for_each_cmdbuf_ctx(man, i, ctx) in vmw_cmdbuf_work_func()
527 mutex_lock(&man->error_mutex); in vmw_cmdbuf_work_func()
528 spin_lock(&man->lock); in vmw_cmdbuf_work_func()
529 list_for_each_entry_safe(entry, next, &man->error, list) { in vmw_cmdbuf_work_func()
563 if (man->using_mob) in vmw_cmdbuf_work_func()
576 for_each_cmdbuf_ctx(man, i, ctx) in vmw_cmdbuf_work_func()
577 man->ctx[i].block_submission = true; in vmw_cmdbuf_work_func()
579 spin_unlock(&man->lock); in vmw_cmdbuf_work_func()
582 if (global_block && vmw_cmdbuf_preempt(man, 0)) in vmw_cmdbuf_work_func()
585 spin_lock(&man->lock); in vmw_cmdbuf_work_func()
586 for_each_cmdbuf_ctx(man, i, ctx) { in vmw_cmdbuf_work_func()
588 vmw_cmdbuf_ctx_process(man, ctx, &dummy); in vmw_cmdbuf_work_func()
605 vmw_cmdbuf_man_process(man); in vmw_cmdbuf_work_func()
606 spin_unlock(&man->lock); in vmw_cmdbuf_work_func()
608 if (global_block && vmw_cmdbuf_startstop(man, 0, true)) in vmw_cmdbuf_work_func()
613 vmw_cmd_send_fence(man->dev_priv, &dummy); in vmw_cmdbuf_work_func()
614 wake_up_all(&man->idle_queue); in vmw_cmdbuf_work_func()
617 mutex_unlock(&man->error_mutex); in vmw_cmdbuf_work_func()
627 static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man, in vmw_cmdbuf_man_idle() argument
634 spin_lock(&man->lock); in vmw_cmdbuf_man_idle()
635 vmw_cmdbuf_man_process(man); in vmw_cmdbuf_man_idle()
636 for_each_cmdbuf_ctx(man, i, ctx) { in vmw_cmdbuf_man_idle()
643 idle = list_empty(&man->error); in vmw_cmdbuf_man_idle()
646 spin_unlock(&man->lock); in vmw_cmdbuf_man_idle()
660 static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man) in __vmw_cmdbuf_cur_flush() argument
662 struct vmw_cmdbuf_header *cur = man->cur; in __vmw_cmdbuf_cur_flush()
664 lockdep_assert_held_once(&man->cur_mutex); in __vmw_cmdbuf_cur_flush()
669 spin_lock(&man->lock); in __vmw_cmdbuf_cur_flush()
670 if (man->cur_pos == 0) { in __vmw_cmdbuf_cur_flush()
675 man->cur->cb_header->length = man->cur_pos; in __vmw_cmdbuf_cur_flush()
676 vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0); in __vmw_cmdbuf_cur_flush()
678 spin_unlock(&man->lock); in __vmw_cmdbuf_cur_flush()
679 man->cur = NULL; in __vmw_cmdbuf_cur_flush()
680 man->cur_pos = 0; in __vmw_cmdbuf_cur_flush()
693 int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man, in vmw_cmdbuf_cur_flush() argument
696 int ret = vmw_cmdbuf_cur_lock(man, interruptible); in vmw_cmdbuf_cur_flush()
701 __vmw_cmdbuf_cur_flush(man); in vmw_cmdbuf_cur_flush()
702 vmw_cmdbuf_cur_unlock(man); in vmw_cmdbuf_cur_flush()
718 int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible, in vmw_cmdbuf_idle() argument
723 ret = vmw_cmdbuf_cur_flush(man, interruptible); in vmw_cmdbuf_idle()
724 vmw_generic_waiter_add(man->dev_priv, in vmw_cmdbuf_idle()
726 &man->dev_priv->cmdbuf_waiters); in vmw_cmdbuf_idle()
730 (man->idle_queue, vmw_cmdbuf_man_idle(man, true), in vmw_cmdbuf_idle()
734 (man->idle_queue, vmw_cmdbuf_man_idle(man, true), in vmw_cmdbuf_idle()
737 vmw_generic_waiter_remove(man->dev_priv, in vmw_cmdbuf_idle()
739 &man->dev_priv->cmdbuf_waiters); in vmw_cmdbuf_idle()
741 if (!vmw_cmdbuf_man_idle(man, true)) in vmw_cmdbuf_idle()
762 static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man, in vmw_cmdbuf_try_alloc() argument
771 spin_lock(&man->lock); in vmw_cmdbuf_try_alloc()
772 ret = drm_mm_insert_node(&man->mm, info->node, info->page_size); in vmw_cmdbuf_try_alloc()
774 vmw_cmdbuf_man_process(man); in vmw_cmdbuf_try_alloc()
775 ret = drm_mm_insert_node(&man->mm, info->node, info->page_size); in vmw_cmdbuf_try_alloc()
778 spin_unlock(&man->lock); in vmw_cmdbuf_try_alloc()
796 static int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man, in vmw_cmdbuf_alloc_space() argument
812 if (mutex_lock_interruptible(&man->space_mutex)) in vmw_cmdbuf_alloc_space()
815 mutex_lock(&man->space_mutex); in vmw_cmdbuf_alloc_space()
819 if (vmw_cmdbuf_try_alloc(man, &info)) in vmw_cmdbuf_alloc_space()
822 vmw_generic_waiter_add(man->dev_priv, in vmw_cmdbuf_alloc_space()
824 &man->dev_priv->cmdbuf_waiters); in vmw_cmdbuf_alloc_space()
830 (man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info)); in vmw_cmdbuf_alloc_space()
833 (man->dev_priv, SVGA_IRQFLAG_COMMAND_BUFFER, in vmw_cmdbuf_alloc_space()
834 &man->dev_priv->cmdbuf_waiters); in vmw_cmdbuf_alloc_space()
835 mutex_unlock(&man->space_mutex); in vmw_cmdbuf_alloc_space()
839 wait_event(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info)); in vmw_cmdbuf_alloc_space()
841 vmw_generic_waiter_remove(man->dev_priv, in vmw_cmdbuf_alloc_space()
843 &man->dev_priv->cmdbuf_waiters); in vmw_cmdbuf_alloc_space()
846 mutex_unlock(&man->space_mutex); in vmw_cmdbuf_alloc_space()
860 static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man, in vmw_cmdbuf_space_pool() argument
869 if (!man->has_pool) in vmw_cmdbuf_space_pool()
872 ret = vmw_cmdbuf_alloc_space(man, &header->node, size, interruptible); in vmw_cmdbuf_space_pool()
877 header->cb_header = dma_pool_zalloc(man->headers, GFP_KERNEL, in vmw_cmdbuf_space_pool()
887 header->cmd = man->map + offset; in vmw_cmdbuf_space_pool()
888 if (man->using_mob) { in vmw_cmdbuf_space_pool()
890 cb_hdr->ptr.mob.mobid = man->cmd_space->tbo.resource->start; in vmw_cmdbuf_space_pool()
893 cb_hdr->ptr.pa = (u64)man->handle + (u64)offset; in vmw_cmdbuf_space_pool()
899 spin_lock(&man->lock); in vmw_cmdbuf_space_pool()
901 spin_unlock(&man->lock); in vmw_cmdbuf_space_pool()
914 static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man, in vmw_cmdbuf_space_inline() argument
924 dheader = dma_pool_zalloc(man->dheaders, GFP_KERNEL, in vmw_cmdbuf_space_inline()
955 void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man, in vmw_cmdbuf_alloc() argument
969 ret = vmw_cmdbuf_space_inline(man, header, size); in vmw_cmdbuf_alloc()
971 ret = vmw_cmdbuf_space_pool(man, header, size, interruptible); in vmw_cmdbuf_alloc()
978 header->man = man; in vmw_cmdbuf_alloc()
998 static void *vmw_cmdbuf_reserve_cur(struct vmw_cmdbuf_man *man, in vmw_cmdbuf_reserve_cur() argument
1006 if (vmw_cmdbuf_cur_lock(man, interruptible)) in vmw_cmdbuf_reserve_cur()
1009 cur = man->cur; in vmw_cmdbuf_reserve_cur()
1010 if (cur && (size + man->cur_pos > cur->size || in vmw_cmdbuf_reserve_cur()
1013 __vmw_cmdbuf_cur_flush(man); in vmw_cmdbuf_reserve_cur()
1015 if (!man->cur) { in vmw_cmdbuf_reserve_cur()
1016 ret = vmw_cmdbuf_alloc(man, in vmw_cmdbuf_reserve_cur()
1017 max_t(size_t, size, man->default_size), in vmw_cmdbuf_reserve_cur()
1018 interruptible, &man->cur); in vmw_cmdbuf_reserve_cur()
1020 vmw_cmdbuf_cur_unlock(man); in vmw_cmdbuf_reserve_cur()
1024 cur = man->cur; in vmw_cmdbuf_reserve_cur()
1034 return (void *) (man->cur->cmd + man->cur_pos); in vmw_cmdbuf_reserve_cur()
1044 static void vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man *man, in vmw_cmdbuf_commit_cur() argument
1047 struct vmw_cmdbuf_header *cur = man->cur; in vmw_cmdbuf_commit_cur()
1049 lockdep_assert_held_once(&man->cur_mutex); in vmw_cmdbuf_commit_cur()
1052 man->cur_pos += size; in vmw_cmdbuf_commit_cur()
1056 __vmw_cmdbuf_cur_flush(man); in vmw_cmdbuf_commit_cur()
1057 vmw_cmdbuf_cur_unlock(man); in vmw_cmdbuf_commit_cur()
1073 void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size, in vmw_cmdbuf_reserve() argument
1078 return vmw_cmdbuf_reserve_cur(man, size, ctx_id, interruptible); in vmw_cmdbuf_reserve()
1101 void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size, in vmw_cmdbuf_commit() argument
1105 vmw_cmdbuf_commit_cur(man, size, flush); in vmw_cmdbuf_commit()
1109 (void) vmw_cmdbuf_cur_lock(man, false); in vmw_cmdbuf_commit()
1110 __vmw_cmdbuf_cur_flush(man); in vmw_cmdbuf_commit()
1112 man->cur = header; in vmw_cmdbuf_commit()
1113 man->cur_pos = size; in vmw_cmdbuf_commit()
1117 __vmw_cmdbuf_cur_flush(man); in vmw_cmdbuf_commit()
1118 vmw_cmdbuf_cur_unlock(man); in vmw_cmdbuf_commit()
1131 static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man, in vmw_cmdbuf_send_device_command() argument
1137 void *cmd = vmw_cmdbuf_alloc(man, size, false, &header); in vmw_cmdbuf_send_device_command()
1145 spin_lock(&man->lock); in vmw_cmdbuf_send_device_command()
1147 spin_unlock(&man->lock); in vmw_cmdbuf_send_device_command()
1168 static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context) in vmw_cmdbuf_preempt() argument
1179 return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd)); in vmw_cmdbuf_preempt()
1193 static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context, in vmw_cmdbuf_startstop() argument
1205 return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd)); in vmw_cmdbuf_startstop()
1220 int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size) in vmw_cmdbuf_set_pool_size() argument
1222 struct vmw_private *dev_priv = man->dev_priv; in vmw_cmdbuf_set_pool_size()
1225 if (man->has_pool) in vmw_cmdbuf_set_pool_size()
1230 man->map = dma_alloc_coherent(dev_priv->drm.dev, size, in vmw_cmdbuf_set_pool_size()
1231 &man->handle, GFP_KERNEL); in vmw_cmdbuf_set_pool_size()
1232 if (man->map) { in vmw_cmdbuf_set_pool_size()
1233 man->using_mob = false; in vmw_cmdbuf_set_pool_size()
1252 ret = vmw_bo_create(dev_priv, &bo_params, &man->cmd_space); in vmw_cmdbuf_set_pool_size()
1256 man->map = vmw_bo_map_and_cache(man->cmd_space); in vmw_cmdbuf_set_pool_size()
1257 man->using_mob = man->map; in vmw_cmdbuf_set_pool_size()
1260 man->size = size; in vmw_cmdbuf_set_pool_size()
1261 drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT); in vmw_cmdbuf_set_pool_size()
1263 man->has_pool = true; in vmw_cmdbuf_set_pool_size()
1271 man->default_size = VMW_CMDBUF_INLINE_SIZE; in vmw_cmdbuf_set_pool_size()
1274 (man->using_mob) ? "MOB" : "DMA"); in vmw_cmdbuf_set_pool_size()
1291 struct vmw_cmdbuf_man *man; in vmw_cmdbuf_man_create() local
1299 man = kzalloc(sizeof(*man), GFP_KERNEL); in vmw_cmdbuf_man_create()
1300 if (!man) in vmw_cmdbuf_man_create()
1303 man->num_contexts = (dev_priv->capabilities & SVGA_CAP_HP_CMD_QUEUE) ? in vmw_cmdbuf_man_create()
1305 man->headers = dma_pool_create("vmwgfx cmdbuf", in vmw_cmdbuf_man_create()
1309 if (!man->headers) { in vmw_cmdbuf_man_create()
1314 man->dheaders = dma_pool_create("vmwgfx inline cmdbuf", in vmw_cmdbuf_man_create()
1318 if (!man->dheaders) { in vmw_cmdbuf_man_create()
1323 for_each_cmdbuf_ctx(man, i, ctx) in vmw_cmdbuf_man_create()
1326 INIT_LIST_HEAD(&man->error); in vmw_cmdbuf_man_create()
1327 spin_lock_init(&man->lock); in vmw_cmdbuf_man_create()
1328 mutex_init(&man->cur_mutex); in vmw_cmdbuf_man_create()
1329 mutex_init(&man->space_mutex); in vmw_cmdbuf_man_create()
1330 mutex_init(&man->error_mutex); in vmw_cmdbuf_man_create()
1331 man->default_size = VMW_CMDBUF_INLINE_SIZE; in vmw_cmdbuf_man_create()
1332 init_waitqueue_head(&man->alloc_queue); in vmw_cmdbuf_man_create()
1333 init_waitqueue_head(&man->idle_queue); in vmw_cmdbuf_man_create()
1334 man->dev_priv = dev_priv; in vmw_cmdbuf_man_create()
1335 man->max_hw_submitted = SVGA_CB_MAX_QUEUED_PER_CONTEXT - 1; in vmw_cmdbuf_man_create()
1336 INIT_WORK(&man->work, &vmw_cmdbuf_work_func); in vmw_cmdbuf_man_create()
1339 ret = vmw_cmdbuf_startstop(man, 0, true); in vmw_cmdbuf_man_create()
1342 vmw_cmdbuf_man_destroy(man); in vmw_cmdbuf_man_create()
1346 return man; in vmw_cmdbuf_man_create()
1349 dma_pool_destroy(man->headers); in vmw_cmdbuf_man_create()
1351 kfree(man); in vmw_cmdbuf_man_create()
1367 void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man) in vmw_cmdbuf_remove_pool() argument
1369 if (!man->has_pool) in vmw_cmdbuf_remove_pool()
1372 man->has_pool = false; in vmw_cmdbuf_remove_pool()
1373 man->default_size = VMW_CMDBUF_INLINE_SIZE; in vmw_cmdbuf_remove_pool()
1374 (void) vmw_cmdbuf_idle(man, false, 10*HZ); in vmw_cmdbuf_remove_pool()
1375 if (man->using_mob) in vmw_cmdbuf_remove_pool()
1376 vmw_bo_unreference(&man->cmd_space); in vmw_cmdbuf_remove_pool()
1378 dma_free_coherent(man->dev_priv->drm.dev, in vmw_cmdbuf_remove_pool()
1379 man->size, man->map, man->handle); in vmw_cmdbuf_remove_pool()
1389 void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man) in vmw_cmdbuf_man_destroy() argument
1391 WARN_ON_ONCE(man->has_pool); in vmw_cmdbuf_man_destroy()
1392 (void) vmw_cmdbuf_idle(man, false, 10*HZ); in vmw_cmdbuf_man_destroy()
1394 if (vmw_cmdbuf_startstop(man, 0, false)) in vmw_cmdbuf_man_destroy()
1397 vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR, in vmw_cmdbuf_man_destroy()
1398 &man->dev_priv->error_waiters); in vmw_cmdbuf_man_destroy()
1399 (void) cancel_work_sync(&man->work); in vmw_cmdbuf_man_destroy()
1400 dma_pool_destroy(man->dheaders); in vmw_cmdbuf_man_destroy()
1401 dma_pool_destroy(man->headers); in vmw_cmdbuf_man_destroy()
1402 mutex_destroy(&man->cur_mutex); in vmw_cmdbuf_man_destroy()
1403 mutex_destroy(&man->space_mutex); in vmw_cmdbuf_man_destroy()
1404 mutex_destroy(&man->error_mutex); in vmw_cmdbuf_man_destroy()
1405 kfree(man); in vmw_cmdbuf_man_destroy()