/linux/drivers/gpu/drm/i915/gt/ |
H A D | intel_engine_heartbeat.c | 17 * While the engine is active, we send a periodic pulse along the engine 19 * is stuck, and we fail to preempt it, we declare the engine hung and 23 static bool next_heartbeat(struct intel_engine_cs *engine) in next_heartbeat() argument 28 delay = READ_ONCE(engine->props.heartbeat_interval_ms); in next_heartbeat() 30 rq = engine->heartbeat.systole; in next_heartbeat() 42 delay == engine->defaults.heartbeat_interval_ms) { in next_heartbeat() 50 longer = READ_ONCE(engine->props.preempt_timeout_ms) * 2; in next_heartbeat() 51 longer = intel_clamp_heartbeat_interval_ms(engine, longer); in next_heartbeat() 62 mod_delayed_work(system_highpri_wq, &engine->heartbeat.work, delay + 1); in next_heartbeat() 79 static void idle_pulse(struct intel_engine_cs *engine, struct i915_request *rq) in idle_pulse() argument [all …]
|
H A D | mock_engine.c | 60 static struct intel_ring *mock_ring(struct intel_engine_cs *engine) in mock_ring() argument 75 ring->vma = create_ring_vma(engine->gt->ggtt, PAGE_SIZE); in mock_ring() 93 static struct i915_request *first_request(struct mock_engine *engine) in first_request() argument 95 return list_first_entry_or_null(&engine->hw_queue, in first_request() 106 intel_engine_signal_breadcrumbs(request->engine); in advance() 111 struct mock_engine *engine = timer_container_of(engine, t, hw_delay); in hw_delay_complete() local 115 spin_lock_irqsave(&engine->hw_lock, flags); in hw_delay_complete() 118 request = first_request(engine); in hw_delay_complete() 126 while ((request = first_request(engine))) { in hw_delay_complete() 128 mod_timer(&engine->hw_delay, in hw_delay_complete() [all …]
|
H A D | selftest_engine_heartbeat.c | 14 static void reset_heartbeat(struct intel_engine_cs *engine) in reset_heartbeat() argument 16 intel_engine_set_heartbeat(engine, in reset_heartbeat() 17 engine->defaults.heartbeat_interval_ms); in reset_heartbeat() 37 static int engine_sync_barrier(struct intel_engine_cs *engine) in engine_sync_barrier() argument 39 return timeline_sync(engine->kernel_context->timeline); in engine_sync_barrier() 90 static int __live_idle_pulse(struct intel_engine_cs *engine, in __live_idle_pulse() argument 96 GEM_BUG_ON(!intel_engine_pm_is_awake(engine)); in __live_idle_pulse() 106 err = i915_active_acquire_preallocate_barrier(&p->active, engine); in __live_idle_pulse() 116 GEM_BUG_ON(llist_empty(&engine->barrier_tasks)); in __live_idle_pulse() 118 err = fn(engine); in __live_idle_pulse() [all …]
|
H A D | intel_engine_pm.h | 17 intel_engine_pm_is_awake(const struct intel_engine_cs *engine) in intel_engine_pm_is_awake() argument 19 return intel_wakeref_is_active(&engine->wakeref); in intel_engine_pm_is_awake() 22 static inline void __intel_engine_pm_get(struct intel_engine_cs *engine) in __intel_engine_pm_get() argument 24 __intel_wakeref_get(&engine->wakeref); in __intel_engine_pm_get() 27 static inline void intel_engine_pm_get(struct intel_engine_cs *engine) in intel_engine_pm_get() argument 29 intel_wakeref_get(&engine->wakeref); in intel_engine_pm_get() 32 static inline bool intel_engine_pm_get_if_awake(struct intel_engine_cs *engine) in intel_engine_pm_get_if_awake() argument 34 return intel_wakeref_get_if_active(&engine->wakeref); in intel_engine_pm_get_if_awake() 37 static inline void intel_engine_pm_might_get(struct intel_engine_cs *engine) in intel_engine_pm_might_get() argument 39 if (!intel_engine_is_virtual(engine)) { in intel_engine_pm_might_get() [all …]
|
H A D | intel_engine_user.c | 39 void intel_engine_add_user(struct intel_engine_cs *engine) in intel_engine_add_user() argument 41 llist_add(&engine->uabi_llist, &engine->i915->uabi_engines_llist); in intel_engine_add_user() 87 struct intel_engine_cs *engine = in sort_engines() local 88 container_of(pos, typeof(*engine), uabi_llist); in sort_engines() 89 list_add(&engine->uabi_list, engines); in sort_engines() 97 u8 engine; in set_scheduler_caps() member 106 struct intel_engine_cs *engine; in set_scheduler_caps() local 111 for_each_uabi_engine(engine, i915) { /* all engines must agree! */ in set_scheduler_caps() 114 if (engine->sched_engine->schedule) in set_scheduler_caps() 121 if (intel_uc_uses_guc_submission(&engine->gt->uc)) in set_scheduler_caps() [all …]
|
H A D | intel_engine_pm.c | 20 static void intel_gsc_idle_msg_enable(struct intel_engine_cs *engine) in intel_gsc_idle_msg_enable() argument 22 struct drm_i915_private *i915 = engine->i915; in intel_gsc_idle_msg_enable() 24 if (MEDIA_VER(i915) >= 13 && engine->id == GSC0) { in intel_gsc_idle_msg_enable() 25 intel_uncore_write(engine->gt->uncore, in intel_gsc_idle_msg_enable() 29 intel_uncore_write(engine->gt->uncore, in intel_gsc_idle_msg_enable() 42 int type = intel_gt_coherent_map_type(ce->engine->gt, obj, true); in dbg_poison_ce() 60 struct intel_engine_cs *engine = in __engine_unpark() local 61 container_of(wf, typeof(*engine), wakeref); in __engine_unpark() 64 ENGINE_TRACE(engine, "\n"); in __engine_unpark() 66 engine->wakeref_track = intel_gt_pm_get(engine->gt); in __engine_unpark() [all …]
|
H A D | intel_execlists_submission.c | 24 * shouldn't we just need a set of those per engine command streamer? This is 26 * rings, the engine cs shifts to a new "ring buffer" with every context 41 * Now that ringbuffers belong per-context (and not per-engine, like before) 42 * and that contexts are uniquely tied to a given engine (and not reusable, 45 * - One ringbuffer per-engine inside each context. 46 * - One backing object per-engine inside each context. 50 * more complex, because we don't know at creation time which engine is going 55 * gets populated for a given engine once we receive an execbuffer. If later 57 * engine, we allocate/populate a new ringbuffer and context backing object and 74 * for the appropriate engine: this structure contains a copy of the context's [all …]
|
H A D | intel_engine.h | 47 * ENGINE_READ(engine, REG_FOO); 52 * ENGINE_READ_IDX(engine, REG_BAR, i) 130 intel_read_status_page(const struct intel_engine_cs *engine, int reg) in intel_read_status_page() argument 133 return READ_ONCE(engine->status_page.addr[reg]); in intel_read_status_page() 137 intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value) in intel_write_status_page() argument 144 drm_clflush_virt_range(&engine->status_page.addr[reg], sizeof(value)); in intel_write_status_page() 145 WRITE_ONCE(engine->status_page.addr[reg], value); in intel_write_status_page() 146 drm_clflush_virt_range(&engine->status_page.addr[reg], sizeof(value)); in intel_write_status_page() 184 void intel_engine_stop(struct intel_engine_cs *engine); 185 void intel_engine_cleanup(struct intel_engine_cs *engine); [all …]
|
H A D | selftest_context.c | 75 static int __live_context_size(struct intel_engine_cs *engine) in __live_context_size() argument 82 ce = intel_context_create(engine); in __live_context_size() 91 intel_gt_coherent_map_type(engine->gt, in __live_context_size() 112 vaddr += engine->context_size - I915_GTT_PAGE_SIZE; in __live_context_size() 127 rq = intel_engine_create_kernel_request(engine); in __live_context_size() 137 pr_err("%s context overwrote trailing red-zone!", engine->name); in __live_context_size() 151 struct intel_engine_cs *engine; in live_context_size() local 160 for_each_engine(engine, gt, id) { in live_context_size() 163 if (!engine->context_size) in live_context_size() 166 intel_engine_pm_get(engine); in live_context_size() [all …]
|
H A D | selftest_engine_pm.c | 76 struct intel_engine_cs *engine = ce->engine; in __measure_timestamps() local 77 u32 *sema = memset32(engine->status_page.addr + 1000, 0, 5); in __measure_timestamps() 78 u32 offset = i915_ggtt_offset(engine->status_page.vma); in __measure_timestamps() 96 cs = emit_srm(cs, RING_TIMESTAMP(engine->mmio_base), offset + 4000); in __measure_timestamps() 97 cs = emit_srm(cs, RING_CTX_TIMESTAMP(engine->mmio_base), offset + 4004); in __measure_timestamps() 102 cs = emit_srm(cs, RING_TIMESTAMP(engine->mmio_base), offset + 4016); in __measure_timestamps() 103 cs = emit_srm(cs, RING_CTX_TIMESTAMP(engine->mmio_base), offset + 4012); in __measure_timestamps() 108 intel_engine_flush_submission(engine); in __measure_timestamps() 132 engine->name, sema[1], sema[3], sema[0], sema[4]); in __measure_timestamps() 139 static int __live_engine_timestamps(struct intel_engine_cs *engine) in __live_engine_timestamps() argument [all …]
|
H A D | selftest_workarounds.c | 34 } engine[I915_NUM_ENGINES]; member 64 struct intel_engine_cs *engine; in reference_lists_init() local 73 for_each_engine(engine, gt, id) { in reference_lists_init() 74 struct i915_wa_list *wal = &lists->engine[id].wa_list; in reference_lists_init() 76 wa_init_start(wal, gt, "REF", engine->name); in reference_lists_init() 77 engine_init_workarounds(engine, wal); in reference_lists_init() 80 __intel_engine_init_ctx_wa(engine, in reference_lists_init() 81 &lists->engine[id].ctx_wa_list, in reference_lists_init() 89 struct intel_engine_cs *engine; in reference_lists_fini() local 92 for_each_engine(engine, gt, id) in reference_lists_fini() [all …]
|
H A D | sysfs_engines.c | 16 struct intel_engine_cs *engine; member 21 return container_of(kobj, struct kobj_engine, base)->engine; in kobj_to_engine() 81 __caps_show(struct intel_engine_cs *engine, in __caps_show() argument 88 switch (engine->class) { in __caps_show() 123 struct intel_engine_cs *engine = kobj_to_engine(kobj); in caps_show() local 125 return __caps_show(engine, engine->uabi_capabilities, buf, true); in caps_show() 144 struct intel_engine_cs *engine = kobj_to_engine(kobj); in max_spin_store() local 169 clamped = intel_clamp_max_busywait_duration_ns(engine, duration); in max_spin_store() 173 WRITE_ONCE(engine->props.max_busywait_duration_ns, duration); in max_spin_store() 181 struct intel_engine_cs *engine = kobj_to_engine(kobj); in max_spin_show() local [all …]
|
H A D | selftest_execlists.c | 24 #define CS_GPR(engine, n) ((engine)->mmio_base + 0x600 + (n) * 4) argument 42 static int wait_for_submit(struct intel_engine_cs *engine, in wait_for_submit() argument 47 tasklet_hi_schedule(&engine->sched_engine->tasklet); in wait_for_submit() 57 intel_engine_flush_submission(engine); in wait_for_submit() 58 if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq)) in wait_for_submit() 68 static int wait_for_reset(struct intel_engine_cs *engine, in wait_for_reset() argument 76 intel_engine_flush_submission(engine); in wait_for_reset() 78 if (READ_ONCE(engine->execlists.pending[0])) in wait_for_reset() 90 engine->name, in wait_for_reset() 100 engine->name, in wait_for_reset() [all …]
|
H A D | selftest_ring_submission.c | 9 static struct i915_vma *create_wally(struct intel_engine_cs *engine) in create_wally() argument 16 obj = i915_gem_object_create_internal(engine->i915, 4096); in create_wally() 20 vma = i915_vma_instance(obj, engine->gt->vm, NULL); in create_wally() 44 if (GRAPHICS_VER(engine->i915) >= 6) { in create_wally() 47 } else if (GRAPHICS_VER(engine->i915) >= 4) { in create_wally() 61 vma->private = intel_context_create(engine); /* dummy residuals */ in create_wally() 89 static int new_context_sync(struct intel_engine_cs *engine) in new_context_sync() argument 94 ce = intel_context_create(engine); in new_context_sync() 104 static int mixed_contexts_sync(struct intel_engine_cs *engine, u32 *result) in mixed_contexts_sync() argument 111 err = context_sync(engine->kernel_context); in mixed_contexts_sync() [all …]
|
H A D | selftest_mocs.c | 24 static struct intel_context *mocs_context_create(struct intel_engine_cs *engine) in mocs_context_create() argument 28 ce = intel_context_create(engine); in mocs_context_create() 134 struct intel_gt *gt = rq->engine->gt; in read_mocs_table() 143 addr = mocs_offset(rq->engine); in read_mocs_table() 160 static int check_mocs_table(struct intel_engine_cs *engine, in check_mocs_table() argument 173 engine->name, i, **vaddr, expect); in check_mocs_table() 192 static int check_l3cc_table(struct intel_engine_cs *engine, in check_l3cc_table() argument 205 if (!mcr_range(engine->i915, reg) && **vaddr != expect) { in check_l3cc_table() 207 engine->name, i, **vaddr, expect); in check_l3cc_table() 238 if (!err && ce->engine->class == RENDER_CLASS) in check_mocs_engine() [all …]
|
H A D | intel_gt_requests.c | 29 static bool engine_active(const struct intel_engine_cs *engine) in engine_active() argument 31 return !list_empty(&engine->kernel_context->timeline->requests); in engine_active() 36 struct intel_engine_cs *engine; in flush_submission() local 46 for_each_engine(engine, gt, id) { in flush_submission() 47 intel_engine_flush_submission(engine); in flush_submission() 50 flush_work(&engine->retire_work); in flush_submission() 51 flush_delayed_work(&engine->wakeref.work); in flush_submission() 54 active |= engine_active(engine); in flush_submission() 62 struct intel_engine_cs *engine = in engine_retire() local 63 container_of(work, typeof(*engine), retire_work); in engine_retire() [all …]
|
/linux/drivers/gpu/drm/nouveau/nvkm/core/ |
H A D | engine.c | 24 #include <core/engine.h> 31 nvkm_engine_chsw_load(struct nvkm_engine *engine) in nvkm_engine_chsw_load() argument 33 if (engine->func->chsw_load) in nvkm_engine_chsw_load() 34 return engine->func->chsw_load(engine); in nvkm_engine_chsw_load() 39 nvkm_engine_reset(struct nvkm_engine *engine) in nvkm_engine_reset() argument 41 if (engine->func->reset) in nvkm_engine_reset() 42 return engine->func->reset(engine); in nvkm_engine_reset() 44 nvkm_subdev_fini(&engine->subdev, false); in nvkm_engine_reset() 45 return nvkm_subdev_init(&engine->subdev); in nvkm_engine_reset() 51 struct nvkm_engine *engine = *pengine; in nvkm_engine_unref() local [all …]
|
/linux/drivers/gpu/drm/sun4i/ |
H A D | sunxi_engine.h | 27 * This callback allows to prepare our engine for an atomic 34 void (*atomic_begin)(struct sunxi_engine *engine, 51 int (*atomic_check)(struct sunxi_engine *engine, 63 void (*commit)(struct sunxi_engine *engine, 71 * the layers supported by that engine. 81 struct sunxi_engine *engine); 87 * engine. This is useful only for the composite output. 91 void (*apply_color_correction)(struct sunxi_engine *engine); 97 * engine. This is useful only for the composite output. 101 void (*disable_color_correction)(struct sunxi_engine *engine); [all …]
|
/linux/drivers/video/fbdev/via/ |
H A D | accel.c | 13 static int viafb_set_bpp(void __iomem *engine, u8 bpp) in viafb_set_bpp() argument 19 gemode = readl(engine + VIA_REG_GEMODE) & 0xfffffcfc; in viafb_set_bpp() 34 writel(gemode, engine + VIA_REG_GEMODE); in viafb_set_bpp() 39 static int hw_bitblt_1(void __iomem *engine, u8 op, u32 width, u32 height, in hw_bitblt_1() argument 79 ret = viafb_set_bpp(engine, dst_bpp); in hw_bitblt_1() 91 writel(tmp, engine + 0x08); in hw_bitblt_1() 100 writel(tmp, engine + 0x0C); in hw_bitblt_1() 108 writel(tmp, engine + 0x10); in hw_bitblt_1() 111 writel(fg_color, engine + 0x18); in hw_bitblt_1() 114 writel(bg_color, engine + 0x1C); in hw_bitblt_1() [all …]
|
/linux/Documentation/devicetree/bindings/display/ |
H A D | allwinner,sun4i-a10-display-engine.yaml | 4 $id: http://devicetree.org/schemas/display/allwinner,sun4i-a10-display-engine.yaml# 7 title: Allwinner A10 Display Engine Pipeline 14 The display engine pipeline (and its entry point, since it can be 52 - allwinner,sun4i-a10-display-engine 53 - allwinner,sun5i-a10s-display-engine 54 - allwinner,sun5i-a13-display-engine 55 - allwinner,sun6i-a31-display-engine 56 - allwinner,sun6i-a31s-display-engine 57 - allwinner,sun7i-a20-display-engine 58 - allwinner,sun8i-a23-display-engine [all …]
|
/linux/drivers/gpu/drm/i915/selftests/ |
H A D | intel_scheduler_helpers.c | 21 struct intel_engine_cs *engine; in intel_selftest_find_any_engine() local 24 for_each_engine(engine, gt, id) in intel_selftest_find_any_engine() 25 return engine; in intel_selftest_find_any_engine() 27 pr_err("No valid engine found!\n"); in intel_selftest_find_any_engine() 31 int intel_selftest_modify_policy(struct intel_engine_cs *engine, in intel_selftest_modify_policy() argument 37 saved->reset = engine->i915->params.reset; in intel_selftest_modify_policy() 38 saved->flags = engine->flags; in intel_selftest_modify_policy() 39 saved->timeslice = engine->props.timeslice_duration_ms; in intel_selftest_modify_policy() 40 saved->preempt_timeout = engine->props.preempt_timeout_ms; in intel_selftest_modify_policy() 46 * together with engine reset on pre-emption timeout. in intel_selftest_modify_policy() [all …]
|
/linux/drivers/gpu/drm/xe/ |
H A D | xe_hw_engine_types.h | 13 /* See "Engine ID Definition" struct in the Icelake PRM */ 69 * struct xe_hw_engine_class_intf - per hw engine class struct interface 71 * Contains all the hw engine properties per engine class. 82 /** @sched_props.set_job_timeout: Set job timeout in ms for engine */ 84 /** @sched_props.job_timeout_min: Min job timeout in ms for engine */ 86 /** @sched_props.job_timeout_max: Max job timeout in ms for engine */ 104 * struct xe_hw_engine - Hardware engine 106 * Contains all the hardware engine state for physical instances. 109 /** @gt: GT structure this hw engine belongs to */ 111 /** @name: name of this hw engine */ [all …]
|
/linux/drivers/dma/ |
H A D | Kconfig | 3 # DMA engine configuration 7 bool "DMA Engine support" 18 bool "DMA Engine debugging" 22 say N here. This enables DMA engine core and driver debugging. 25 bool "DMA Engine verbose debugging" 30 the DMA engine core and drivers. 61 tristate "Altera / Intel mSGDMA Engine" 74 provide DMA engine support. This includes the original ARM 131 tristate "Broadcom SBA RAID engine support" 140 Enable support for Broadcom SBA RAID Engine. The SBA RAID [all …]
|
/linux/drivers/mtd/nand/ |
H A D | ecc.c | 3 * Generic Error-Correcting Code (ECC) engine 10 * This file describes the abstraction of any NAND ECC engine. It has been 13 * There are three main situations where instantiating this ECC engine makes 15 * - external: The ECC engine is outside the NAND pipeline, typically this 16 * is a software ECC engine, or an hardware engine that is 18 * - pipelined: The ECC engine is inside the NAND pipeline, ie. on the 23 * - ondie: The ECC engine is inside the NAND pipeline, on the chip's side. 28 * - prepare: Prepare an I/O request. Enable/disable the ECC engine based on 30 * engine, this step may involve to derive the ECC bytes and place 35 * hardware specific stuff to do, like shutting down the engine to [all …]
|
/linux/include/crypto/ |
H A D | engine.h | 3 * Crypto engine API 21 * struct crypto_engine_op - crypto hardware engine operations 25 int (*do_one_request)(struct crypto_engine *engine, 54 int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine, 56 int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine, 58 int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine, 60 int crypto_transfer_kpp_request_to_engine(struct crypto_engine *engine, 62 int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine, 64 void crypto_finalize_aead_request(struct crypto_engine *engine, 66 void crypto_finalize_akcipher_request(struct crypto_engine *engine, [all …]
|