xref: /linux/drivers/gpu/drm/i915/pxp/intel_pxp_cmd.c (revision 906fd46a65383cd639e5eec72a047efc33045d86)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright(c) 2020, Intel Corporation. All rights reserved.
4  */
5 
6 #include "gt/intel_context.h"
7 #include "gt/intel_engine_pm.h"
8 #include "gt/intel_gpu_commands.h"
9 #include "gt/intel_ring.h"
10 
11 #include "i915_trace.h"
12 
13 #include "intel_pxp.h"
14 #include "intel_pxp_cmd.h"
15 #include "intel_pxp_session.h"
16 #include "intel_pxp_types.h"
17 
18 /* stall until prior PXP and MFX/HCP/HUC objects are cmopleted */
19 #define MFX_WAIT_PXP (MFX_WAIT | \
20 		      MFX_WAIT_DW0_PXP_SYNC_CONTROL_FLAG | \
21 		      MFX_WAIT_DW0_MFX_SYNC_CONTROL_FLAG)
22 
23 static u32 *pxp_emit_session_selection(u32 *cs, u32 idx)
24 {
25 	*cs++ = MFX_WAIT_PXP;
26 
27 	/* pxp off */
28 	*cs++ = MI_FLUSH_DW;
29 	*cs++ = 0;
30 	*cs++ = 0;
31 
32 	/* select session */
33 	*cs++ = MI_SET_APPID | MI_SET_APPID_SESSION_ID(idx);
34 
35 	*cs++ = MFX_WAIT_PXP;
36 
37 	/* pxp on */
38 	*cs++ = MI_FLUSH_DW | MI_FLUSH_DW_PROTECTED_MEM_EN |
39 		MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
40 	*cs++ = I915_GEM_HWS_PXP_ADDR | MI_FLUSH_DW_USE_GTT;
41 	*cs++ = 0;
42 
43 	*cs++ = MFX_WAIT_PXP;
44 
45 	return cs;
46 }
47 
48 static u32 *pxp_emit_inline_termination(u32 *cs)
49 {
50 	/* session inline termination */
51 	*cs++ = CRYPTO_KEY_EXCHANGE;
52 	*cs++ = 0;
53 
54 	return cs;
55 }
56 
57 static u32 *pxp_emit_session_termination(u32 *cs, u32 idx)
58 {
59 	cs = pxp_emit_session_selection(cs, idx);
60 	cs = pxp_emit_inline_termination(cs);
61 
62 	return cs;
63 }
64 
65 static u32 *pxp_emit_wait(u32 *cs)
66 {
67 	/* wait for cmds to go through */
68 	*cs++ = MFX_WAIT_PXP;
69 	*cs++ = 0;
70 
71 	return cs;
72 }
73 
74 /*
75  * if we ever need to terminate more than one session, we can submit multiple
76  * selections and terminations back-to-back with a single wait at the end
77  */
78 #define SELECTION_LEN 10
79 #define TERMINATION_LEN 2
80 #define SESSION_TERMINATION_LEN(x) ((SELECTION_LEN + TERMINATION_LEN) * (x))
81 #define WAIT_LEN 2
82 
83 static void pxp_request_commit(struct i915_request *rq)
84 {
85 	struct i915_sched_attr attr = { .priority = I915_PRIORITY_MAX };
86 	struct intel_timeline * const tl = i915_request_timeline(rq);
87 
88 	lockdep_unpin_lock(&tl->mutex, rq->cookie);
89 
90 	trace_i915_request_add(rq);
91 	__i915_request_commit(rq);
92 	__i915_request_queue(rq, &attr);
93 
94 	mutex_unlock(&tl->mutex);
95 }
96 
97 int intel_pxp_terminate_session(struct intel_pxp *pxp, u32 id)
98 {
99 	struct i915_request *rq;
100 	struct intel_context *ce = pxp->ce;
101 	u32 *cs;
102 	int err = 0;
103 
104 	if (!intel_pxp_is_enabled(pxp))
105 		return 0;
106 
107 	rq = i915_request_create(ce);
108 	if (IS_ERR(rq))
109 		return PTR_ERR(rq);
110 
111 	if (ce->engine->emit_init_breadcrumb) {
112 		err = ce->engine->emit_init_breadcrumb(rq);
113 		if (err)
114 			goto out_rq;
115 	}
116 
117 	cs = intel_ring_begin(rq, SESSION_TERMINATION_LEN(1) + WAIT_LEN);
118 	if (IS_ERR(cs)) {
119 		err = PTR_ERR(cs);
120 		goto out_rq;
121 	}
122 
123 	cs = pxp_emit_session_termination(cs, id);
124 	cs = pxp_emit_wait(cs);
125 
126 	intel_ring_advance(rq, cs);
127 
128 out_rq:
129 	i915_request_get(rq);
130 
131 	if (unlikely(err))
132 		i915_request_set_error_once(rq, err);
133 
134 	pxp_request_commit(rq);
135 
136 	if (!err && i915_request_wait(rq, 0, HZ / 5) < 0)
137 		err = -ETIME;
138 
139 	i915_request_put(rq);
140 
141 	return err;
142 }
143 
144