| /linux/drivers/gpu/drm/nouveau/nvkm/core/ |
| H A D | engine.c | 31 nvkm_engine_chsw_load(struct nvkm_engine *engine) in nvkm_engine_chsw_load() argument 33 if (engine->func->chsw_load) in nvkm_engine_chsw_load() 34 return engine->func->chsw_load(engine); in nvkm_engine_chsw_load() 39 nvkm_engine_reset(struct nvkm_engine *engine) in nvkm_engine_reset() argument 41 if (engine->func->reset) in nvkm_engine_reset() 42 return engine->func->reset(engine); in nvkm_engine_reset() 44 nvkm_subdev_fini(&engine->subdev, false); in nvkm_engine_reset() 45 return nvkm_subdev_init(&engine->subdev); in nvkm_engine_reset() 51 struct nvkm_engine *engine = *pengine; in nvkm_engine_unref() local 53 if (engine) { in nvkm_engine_unref() [all …]
|
| /linux/drivers/gpu/drm/i915/gt/ |
| H A D | mock_engine.c | 60 static struct intel_ring *mock_ring(struct intel_engine_cs *engine) in mock_ring() argument 75 ring->vma = create_ring_vma(engine->gt->ggtt, PAGE_SIZE); in mock_ring() 93 static struct i915_request *first_request(struct mock_engine *engine) in first_request() argument 95 return list_first_entry_or_null(&engine->hw_queue, in first_request() 106 intel_engine_signal_breadcrumbs(request->engine); in advance() 111 struct mock_engine *engine = timer_container_of(engine, t, hw_delay); in hw_delay_complete() local 115 spin_lock_irqsave(&engine->hw_lock, flags); in hw_delay_complete() 118 request = first_request(engine); in hw_delay_complete() 126 while ((request = first_request(engine))) { in hw_delay_complete() 128 mod_timer(&engine->hw_delay, in hw_delay_complete() [all …]
|
| H A D | selftest_engine_heartbeat.c | 14 static void reset_heartbeat(struct intel_engine_cs *engine) in reset_heartbeat() argument 16 intel_engine_set_heartbeat(engine, in reset_heartbeat() 17 engine->defaults.heartbeat_interval_ms); in reset_heartbeat() 37 static int engine_sync_barrier(struct intel_engine_cs *engine) in engine_sync_barrier() argument 39 return timeline_sync(engine->kernel_context->timeline); in engine_sync_barrier() 90 static int __live_idle_pulse(struct intel_engine_cs *engine, in __live_idle_pulse() argument 96 GEM_BUG_ON(!intel_engine_pm_is_awake(engine)); in __live_idle_pulse() 106 err = i915_active_acquire_preallocate_barrier(&p->active, engine); in __live_idle_pulse() 116 GEM_BUG_ON(llist_empty(&engine->barrier_tasks)); in __live_idle_pulse() 118 err = fn(engine); in __live_idle_pulse() [all …]
|
| H A D | intel_engine_pm.c | 20 static void intel_gsc_idle_msg_enable(struct intel_engine_cs *engine) in intel_gsc_idle_msg_enable() argument 22 struct drm_i915_private *i915 = engine->i915; in intel_gsc_idle_msg_enable() 24 if (MEDIA_VER(i915) >= 13 && engine->id == GSC0) { in intel_gsc_idle_msg_enable() 25 intel_uncore_write(engine->gt->uncore, in intel_gsc_idle_msg_enable() 29 intel_uncore_write(engine->gt->uncore, in intel_gsc_idle_msg_enable() 42 int type = intel_gt_coherent_map_type(ce->engine->gt, obj, true); in dbg_poison_ce() 60 struct intel_engine_cs *engine = in __engine_unpark() local 61 container_of(wf, typeof(*engine), wakeref); in __engine_unpark() 64 ENGINE_TRACE(engine, "\n"); in __engine_unpark() 66 engine->wakeref_track = intel_gt_pm_get(engine->gt); in __engine_unpark() [all …]
|
| H A D | intel_engine_pm.h | 17 intel_engine_pm_is_awake(const struct intel_engine_cs *engine) in intel_engine_pm_is_awake() argument 19 return intel_wakeref_is_active(&engine->wakeref); in intel_engine_pm_is_awake() 22 static inline void __intel_engine_pm_get(struct intel_engine_cs *engine) in __intel_engine_pm_get() argument 24 __intel_wakeref_get(&engine->wakeref); in __intel_engine_pm_get() 27 static inline void intel_engine_pm_get(struct intel_engine_cs *engine) in intel_engine_pm_get() argument 29 intel_wakeref_get(&engine->wakeref); in intel_engine_pm_get() 32 static inline bool intel_engine_pm_get_if_awake(struct intel_engine_cs *engine) in intel_engine_pm_get_if_awake() argument 34 return intel_wakeref_get_if_active(&engine->wakeref); in intel_engine_pm_get_if_awake() 37 static inline void intel_engine_pm_might_get(struct intel_engine_cs *engine) in intel_engine_pm_might_get() argument 39 if (!intel_engine_is_virtual(engine)) { in intel_engine_pm_might_get() [all …]
|
| H A D | selftest_engine_pm.c | 76 struct intel_engine_cs *engine = ce->engine; in __measure_timestamps() local 77 u32 *sema = memset32(engine->status_page.addr + 1000, 0, 5); in __measure_timestamps() 78 u32 offset = i915_ggtt_offset(engine->status_page.vma); in __measure_timestamps() 96 cs = emit_srm(cs, RING_TIMESTAMP(engine->mmio_base), offset + 4000); in __measure_timestamps() 97 cs = emit_srm(cs, RING_CTX_TIMESTAMP(engine->mmio_base), offset + 4004); in __measure_timestamps() 102 cs = emit_srm(cs, RING_TIMESTAMP(engine->mmio_base), offset + 4016); in __measure_timestamps() 103 cs = emit_srm(cs, RING_CTX_TIMESTAMP(engine->mmio_base), offset + 4012); in __measure_timestamps() 108 intel_engine_flush_submission(engine); in __measure_timestamps() 132 engine->name, sema[1], sema[3], sema[0], sema[4]); in __measure_timestamps() 139 static int __live_engine_timestamps(struct intel_engine_cs *engine) in __live_engine_timestamps() argument [all …]
|
| H A D | selftest_workarounds.c | 34 } engine[I915_NUM_ENGINES]; member 64 struct intel_engine_cs *engine; in reference_lists_init() local 73 for_each_engine(engine, gt, id) { in reference_lists_init() 74 struct i915_wa_list *wal = &lists->engine[id].wa_list; in reference_lists_init() 76 wa_init_start(wal, gt, "REF", engine->name); in reference_lists_init() 77 engine_init_workarounds(engine, wal); in reference_lists_init() 80 __intel_engine_init_ctx_wa(engine, in reference_lists_init() 81 &lists->engine[id].ctx_wa_list, in reference_lists_init() 89 struct intel_engine_cs *engine; in reference_lists_fini() local 92 for_each_engine(engine, gt, id) in reference_lists_fini() [all …]
|
| H A D | selftest_ring_submission.c | 9 static struct i915_vma *create_wally(struct intel_engine_cs *engine) in create_wally() argument 16 obj = i915_gem_object_create_internal(engine->i915, 4096); in create_wally() 20 vma = i915_vma_instance(obj, engine->gt->vm, NULL); in create_wally() 44 if (GRAPHICS_VER(engine->i915) >= 6) { in create_wally() 47 } else if (GRAPHICS_VER(engine->i915) >= 4) { in create_wally() 61 vma->private = intel_context_create(engine); /* dummy residuals */ in create_wally() 89 static int new_context_sync(struct intel_engine_cs *engine) in new_context_sync() argument 94 ce = intel_context_create(engine); in new_context_sync() 104 static int mixed_contexts_sync(struct intel_engine_cs *engine, u32 *result) in mixed_contexts_sync() argument 111 err = context_sync(engine->kernel_context); in mixed_contexts_sync() [all …]
|
| H A D | selftest_mocs.c | 24 static struct intel_context *mocs_context_create(struct intel_engine_cs *engine) in mocs_context_create() argument 28 ce = intel_context_create(engine); in mocs_context_create() 134 struct intel_gt *gt = rq->engine->gt; in read_mocs_table() 143 addr = mocs_offset(rq->engine); in read_mocs_table() 160 static int check_mocs_table(struct intel_engine_cs *engine, in check_mocs_table() argument 173 engine->name, i, **vaddr, expect); in check_mocs_table() 192 static int check_l3cc_table(struct intel_engine_cs *engine, in check_l3cc_table() argument 205 if (!mcr_range(engine->i915, reg) && **vaddr != expect) { in check_l3cc_table() 207 engine->name, i, **vaddr, expect); in check_l3cc_table() 238 if (!err && ce->engine->class == RENDER_CLASS) in check_mocs_engine() [all …]
|
| H A D | selftest_engine_cs.c | 44 static i915_reg_t timestamp_reg(struct intel_engine_cs *engine) in timestamp_reg() argument 46 struct drm_i915_private *i915 = engine->i915; in timestamp_reg() 49 return RING_TIMESTAMP_UDW(engine->mmio_base); in timestamp_reg() 51 return RING_TIMESTAMP(engine->mmio_base); in timestamp_reg() 70 *cs++ = i915_mmio_reg_offset(timestamp_reg(rq->engine)); in write_timestamp() 86 obj = i915_gem_object_create_internal(ce->engine->i915, PAGE_SIZE); in create_empty_batch() 136 struct intel_engine_cs *engine; in perf_mi_bb_start() local 145 for_each_engine(engine, gt, id) { in perf_mi_bb_start() 146 struct intel_context *ce = engine->kernel_context; in perf_mi_bb_start() 151 if (GRAPHICS_VER(engine->i915) < 7 && engine->id != RCS0) in perf_mi_bb_start() [all …]
|
| H A D | selftest_gt_pm.c | 39 static u32 read_timestamp(struct intel_engine_cs *engine) in read_timestamp() argument 41 struct drm_i915_private *i915 = engine->i915; in read_timestamp() 44 ENGINE_READ_FW(engine, RING_TIMESTAMP); in read_timestamp() 47 return ENGINE_READ_FW(engine, RING_TIMESTAMP_UDW); in read_timestamp() 49 return ENGINE_READ_FW(engine, RING_TIMESTAMP); in read_timestamp() 52 static void measure_clocks(struct intel_engine_cs *engine, in measure_clocks() argument 61 cycles[i] = -read_timestamp(engine); in measure_clocks() 66 cycles[i] += read_timestamp(engine); in measure_clocks() 82 struct intel_engine_cs *engine; in live_gt_clocks() local 98 for_each_engine(engine, gt, id) { in live_gt_clocks() [all …]
|
| H A D | gen6_engine_cs.c | 58 intel_gt_scratch_offset(rq->engine->gt, in gen6_emit_post_sync_nonzero_flush() 92 intel_gt_scratch_offset(rq->engine->gt, in gen6_emit_flush_rcs() 153 *cs++ = intel_gt_scratch_offset(rq->engine->gt, in gen6_emit_breadcrumb_rcs() 292 intel_gt_scratch_offset(rq->engine->gt, in gen7_emit_flush_rcs() 377 GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma); in gen6_emit_breadcrumb_xcs() 397 GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma); in gen7_emit_breadcrumb_xcs() 425 void gen6_irq_enable(struct intel_engine_cs *engine) in gen6_irq_enable() argument 427 ENGINE_WRITE(engine, RING_IMR, in gen6_irq_enable() 428 ~(engine->irq_enable_mask | engine->irq_keep_mask)); in gen6_irq_enable() 431 ENGINE_POSTING_READ(engine, RING_IMR); in gen6_irq_enable() [all …]
|
| /linux/drivers/gpu/drm/sun4i/ |
| H A D | sunxi_engine.h | 34 void (*atomic_begin)(struct sunxi_engine *engine, 51 int (*atomic_check)(struct sunxi_engine *engine, 63 void (*commit)(struct sunxi_engine *engine, 81 struct sunxi_engine *engine); 91 void (*apply_color_correction)(struct sunxi_engine *engine); 101 void (*disable_color_correction)(struct sunxi_engine *engine); 114 void (*vblank_quirk)(struct sunxi_engine *engine); 124 void (*mode_set)(struct sunxi_engine *engine, 154 sunxi_engine_commit(struct sunxi_engine *engine, in sunxi_engine_commit() argument 158 if (engine->ops && engine->ops->commit) in sunxi_engine_commit() [all …]
|
| H A D | sun4i_crtc.c | 54 struct sunxi_engine *engine = scrtc->engine; in sun4i_crtc_atomic_check() local 57 if (engine && engine->ops && engine->ops->atomic_check) in sun4i_crtc_atomic_check() 58 ret = engine->ops->atomic_check(engine, crtc_state); in sun4i_crtc_atomic_check() 70 struct sunxi_engine *engine = scrtc->engine; in sun4i_crtc_atomic_begin() local 82 if (engine->ops->atomic_begin) in sun4i_crtc_atomic_begin() 83 engine->ops->atomic_begin(engine, old_state); in sun4i_crtc_atomic_begin() 94 sunxi_engine_commit(scrtc->engine, crtc, state); in sun4i_crtc_atomic_flush() 149 sunxi_engine_mode_set(scrtc->engine, mode); in sun4i_crtc_mode_set_nofb() 193 struct sunxi_engine *engine, in sun4i_crtc_init() argument 204 scrtc->engine = engine; in sun4i_crtc_init() [all …]
|
| /linux/drivers/video/fbdev/via/ |
| H A D | accel.c | 13 static int viafb_set_bpp(void __iomem *engine, u8 bpp) in viafb_set_bpp() argument 19 gemode = readl(engine + VIA_REG_GEMODE) & 0xfffffcfc; in viafb_set_bpp() 34 writel(gemode, engine + VIA_REG_GEMODE); in viafb_set_bpp() 39 static int hw_bitblt_1(void __iomem *engine, u8 op, u32 width, u32 height, in hw_bitblt_1() argument 79 ret = viafb_set_bpp(engine, dst_bpp); in hw_bitblt_1() 91 writel(tmp, engine + 0x08); in hw_bitblt_1() 100 writel(tmp, engine + 0x0C); in hw_bitblt_1() 108 writel(tmp, engine + 0x10); in hw_bitblt_1() 111 writel(fg_color, engine + 0x18); in hw_bitblt_1() 114 writel(bg_color, engine + 0x1C); in hw_bitblt_1() [all …]
|
| /linux/drivers/crypto/marvell/cesa/ |
| H A D | cesa.c | 38 mv_cesa_dequeue_req_locked(struct mv_cesa_engine *engine, in mv_cesa_dequeue_req_locked() argument 43 *backlog = crypto_get_backlog(&engine->queue); in mv_cesa_dequeue_req_locked() 44 req = crypto_dequeue_request(&engine->queue); in mv_cesa_dequeue_req_locked() 52 static void mv_cesa_rearm_engine(struct mv_cesa_engine *engine) in mv_cesa_rearm_engine() argument 58 spin_lock_bh(&engine->lock); in mv_cesa_rearm_engine() 59 if (!engine->req) { in mv_cesa_rearm_engine() 60 req = mv_cesa_dequeue_req_locked(engine, &backlog); in mv_cesa_rearm_engine() 61 engine->req = req; in mv_cesa_rearm_engine() 63 spin_unlock_bh(&engine->lock); in mv_cesa_rearm_engine() 75 static int mv_cesa_std_process(struct mv_cesa_engine *engine, u32 status) in mv_cesa_std_process() argument [all …]
|
| /linux/drivers/gpu/drm/i915/selftests/ |
| H A D | intel_scheduler_helpers.c | 21 struct intel_engine_cs *engine; in intel_selftest_find_any_engine() local 24 for_each_engine(engine, gt, id) in intel_selftest_find_any_engine() 25 return engine; in intel_selftest_find_any_engine() 31 int intel_selftest_modify_policy(struct intel_engine_cs *engine, in intel_selftest_modify_policy() argument 37 saved->reset = engine->i915->params.reset; in intel_selftest_modify_policy() 38 saved->flags = engine->flags; in intel_selftest_modify_policy() 39 saved->timeslice = engine->props.timeslice_duration_ms; in intel_selftest_modify_policy() 40 saved->preempt_timeout = engine->props.preempt_timeout_ms; in intel_selftest_modify_policy() 52 engine->i915->params.reset = 2; in intel_selftest_modify_policy() 53 engine->flags |= I915_ENGINE_WANT_FORCED_PREEMPTION; in intel_selftest_modify_policy() [all …]
|
| /linux/drivers/gpu/drm/nouveau/nvkm/engine/gr/ |
| H A D | base.c | 56 nvkm_gr_chsw_load(struct nvkm_engine *engine) in nvkm_gr_chsw_load() argument 58 struct nvkm_gr *gr = nvkm_gr(engine); in nvkm_gr_chsw_load() 65 nvkm_gr_tile(struct nvkm_engine *engine, int region, struct nvkm_fb_tile *tile) in nvkm_gr_tile() argument 67 struct nvkm_gr *gr = nvkm_gr(engine); in nvkm_gr_tile() 91 struct nvkm_gr *gr = nvkm_gr(oclass->engine); in nvkm_gr_oclass_get() 115 struct nvkm_gr *gr = nvkm_gr(oclass->engine); in nvkm_gr_cclass_new() 122 nvkm_gr_intr(struct nvkm_engine *engine) in nvkm_gr_intr() argument 124 struct nvkm_gr *gr = nvkm_gr(engine); in nvkm_gr_intr() 129 nvkm_gr_nonstall(struct nvkm_engine *engine) in nvkm_gr_nonstall() argument 131 struct nvkm_gr *gr = nvkm_gr(engine); in nvkm_gr_nonstall() [all …]
|
| /linux/drivers/gpu/drm/nouveau/nvkm/engine/dma/ |
| H A D | Kbuild | 2 nvkm-y += nvkm/engine/dma/base.o 3 nvkm-y += nvkm/engine/dma/nv04.o 4 nvkm-y += nvkm/engine/dma/nv50.o 5 nvkm-y += nvkm/engine/dma/gf100.o 6 nvkm-y += nvkm/engine/dma/gf119.o 7 nvkm-y += nvkm/engine/dma/gv100.o 9 nvkm-y += nvkm/engine/dma/user.o 10 nvkm-y += nvkm/engine/dma/usernv04.o 11 nvkm-y += nvkm/engine/dma/usernv50.o 12 nvkm-y += nvkm/engine/dma/usergf100.o [all …]
|
| /linux/drivers/gpu/drm/nouveau/nvkm/engine/sec2/ |
| H A D | base.c | 40 nvkm_sec2_fini(struct nvkm_engine *engine, bool suspend) in nvkm_sec2_fini() argument 42 struct nvkm_sec2 *sec2 = nvkm_sec2(engine); in nvkm_sec2_fini() 43 struct nvkm_subdev *subdev = &sec2->engine.subdev; in nvkm_sec2_fini() 75 nvkm_sec2_init(struct nvkm_engine *engine) in nvkm_sec2_init() argument 77 struct nvkm_sec2 *sec2 = nvkm_sec2(engine); in nvkm_sec2_init() 78 struct nvkm_subdev *subdev = &sec2->engine.subdev; in nvkm_sec2_init() 96 nvkm_sec2_oneinit(struct nvkm_engine *engine) in nvkm_sec2_oneinit() argument 98 struct nvkm_sec2 *sec2 = nvkm_sec2(engine); in nvkm_sec2_oneinit() 99 struct nvkm_subdev *subdev = &sec2->engine.subdev; in nvkm_sec2_oneinit() 100 struct nvkm_intr *intr = &sec2->engine.subdev.device->mc->intr; in nvkm_sec2_oneinit() [all …]
|
| /linux/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/ |
| H A D | nv31.c | 42 int ret = nvkm_gpuobj_new(object->engine->subdev.device, 16, align, in nv31_mpeg_object_bind() 71 spin_lock_irqsave(&mpeg->engine.lock, flags); in nv31_mpeg_chan_dtor() 74 spin_unlock_irqrestore(&mpeg->engine.lock, flags); in nv31_mpeg_chan_dtor() 87 struct nv31_mpeg *mpeg = nv31_mpeg(oclass->engine); in nv31_mpeg_chan_new() 99 spin_lock_irqsave(&mpeg->engine.lock, flags); in nv31_mpeg_chan_new() 104 spin_unlock_irqrestore(&mpeg->engine.lock, flags); in nv31_mpeg_chan_new() 113 nv31_mpeg_tile(struct nvkm_engine *engine, int i, struct nvkm_fb_tile *tile) in nv31_mpeg_tile() argument 115 struct nv31_mpeg *mpeg = nv31_mpeg(engine); in nv31_mpeg_tile() 116 struct nvkm_device *device = mpeg->engine.subdev.device; in nv31_mpeg_tile() 127 struct nvkm_subdev *subdev = &mpeg->engine.subdev; in nv31_mpeg_mthd_dma() [all …]
|
| H A D | nv44.c | 24 #define nv44_mpeg(p) container_of((p), struct nv44_mpeg, engine) 34 struct nvkm_engine engine; member 56 int ret = nvkm_gpuobj_new(chan->object.engine->subdev.device, 264 * 4, in nv44_mpeg_chan_bind() 73 struct nvkm_device *device = mpeg->engine.subdev.device; in nv44_mpeg_chan_fini() 89 spin_lock_irqsave(&mpeg->engine.lock, flags); in nv44_mpeg_chan_dtor() 91 spin_unlock_irqrestore(&mpeg->engine.lock, flags); in nv44_mpeg_chan_dtor() 106 struct nv44_mpeg *mpeg = nv44_mpeg(oclass->engine); in nv44_mpeg_chan_new() 117 spin_lock_irqsave(&mpeg->engine.lock, flags); in nv44_mpeg_chan_new() 119 spin_unlock_irqrestore(&mpeg->engine.lock, flags); in nv44_mpeg_chan_new() 142 nv44_mpeg_intr(struct nvkm_engine *engine) in nv44_mpeg_intr() argument [all …]
|
| /linux/drivers/gpu/drm/i915/gvt/ |
| H A D | execlist.c | 56 static int to_context_switch_event(const struct intel_engine_cs *engine) in to_context_switch_event() argument 58 if (WARN_ON(engine->id >= ARRAY_SIZE(context_switch_events))) in to_context_switch_event() 61 return context_switch_events[engine->id]; in to_context_switch_event() 96 execlist_ring_mmio(execlist->engine, _EL_OFFSET_STATUS); in emulate_execlist_status() 135 execlist_ring_mmio(execlist->engine, _EL_OFFSET_STATUS_PTR); in emulate_csb_update() 137 execlist_ring_mmio(execlist->engine, _EL_OFFSET_STATUS_BUF); in emulate_csb_update() 160 vgpu->hws_pga[execlist->engine->id]); in emulate_csb_update() 166 hwsp_gpa + INTEL_HWS_CSB_WRITE_INDEX(execlist->engine->i915) * 4, in emulate_csb_update() 177 to_context_switch_event(execlist->engine)); in emulate_csb_update() 259 execlist_ring_mmio(execlist->engine, _EL_OFFSET_STATUS); in get_next_execlist_slot() [all …]
|
| /linux/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ |
| H A D | base.c | 39 nvkm_fifo_ctxsw_in_progress(struct nvkm_engine *engine) in nvkm_fifo_ctxsw_in_progress() argument 44 nvkm_runl_foreach(runl, engine->subdev.device->fifo) { in nvkm_fifo_ctxsw_in_progress() 46 if (engn->engine == engine) in nvkm_fifo_ctxsw_in_progress() 76 struct nvkm_fifo *fifo = nvkm_fifo(oclass->engine); in nvkm_fifo_class_new() 96 struct nvkm_fifo *fifo = nvkm_fifo(oclass->engine); in nvkm_fifo_class_get() 125 nvkm_fifo_fini(struct nvkm_engine *engine, bool suspend) in nvkm_fifo_fini() argument 127 struct nvkm_fifo *fifo = nvkm_fifo(engine); in nvkm_fifo_fini() 130 nvkm_inth_block(&fifo->engine.subdev.inth); in nvkm_fifo_fini() 139 nvkm_fifo_init(struct nvkm_engine *engine) in nvkm_fifo_init() argument 141 struct nvkm_fifo *fifo = nvkm_fifo(engine); in nvkm_fifo_init() [all …]
|
| /linux/drivers/gpu/drm/nouveau/nvkm/engine/ |
| H A D | xtensa.c | 30 struct nvkm_xtensa *xtensa = nvkm_xtensa(oclass->engine); in nvkm_xtensa_oclass_get() 47 return nvkm_gpuobj_new(object->engine->subdev.device, 0x10000, align, in nvkm_xtensa_cclass_bind() 57 nvkm_xtensa_intr(struct nvkm_engine *engine) in nvkm_xtensa_intr() argument 59 struct nvkm_xtensa *xtensa = nvkm_xtensa(engine); in nvkm_xtensa_intr() 60 struct nvkm_subdev *subdev = &xtensa->engine.subdev; in nvkm_xtensa_intr() 79 nvkm_xtensa_fini(struct nvkm_engine *engine, bool suspend) in nvkm_xtensa_fini() argument 81 struct nvkm_xtensa *xtensa = nvkm_xtensa(engine); in nvkm_xtensa_fini() 82 struct nvkm_device *device = xtensa->engine.subdev.device; in nvkm_xtensa_fini() 94 nvkm_xtensa_init(struct nvkm_engine *engine) in nvkm_xtensa_init() argument 96 struct nvkm_xtensa *xtensa = nvkm_xtensa(engine); in nvkm_xtensa_init() [all …]
|