xref: /linux/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c (revision f6e8dc9edf963dbc99085e54f6ced6da9daa6100)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright 2023, Intel Corporation.
4  */
5 
6 #include <drm/drm_print.h>
7 #include <drm/intel/i915_hdcp_interface.h>
8 
9 #include "gem/i915_gem_region.h"
10 #include "gt/intel_gt.h"
11 #include "gt/uc/intel_gsc_uc_heci_cmd_submit.h"
12 #include "i915_drv.h"
13 #include "i915_utils.h"
14 #include "intel_hdcp_gsc.h"
15 
16 struct intel_hdcp_gsc_context {
17 	struct drm_i915_private *i915;
18 	struct i915_vma *vma;
19 	void *hdcp_cmd_in;
20 	void *hdcp_cmd_out;
21 };
22 
23 bool intel_hdcp_gsc_check_status(struct drm_device *drm)
24 {
25 	struct drm_i915_private *i915 = to_i915(drm);
26 	struct intel_gt *gt = i915->media_gt;
27 	struct intel_gsc_uc *gsc = gt ? &gt->uc.gsc : NULL;
28 
29 	if (!gsc || !intel_uc_fw_is_running(&gsc->fw)) {
30 		drm_dbg_kms(&i915->drm,
31 			    "GSC components required for HDCP2.2 are not ready\n");
32 		return false;
33 	}
34 
35 	return true;
36 }
37 
38 /*This function helps allocate memory for the command that we will send to gsc cs */
39 static int intel_hdcp_gsc_initialize_message(struct drm_i915_private *i915,
40 					     struct intel_hdcp_gsc_context *gsc_context)
41 {
42 	struct intel_gt *gt = i915->media_gt;
43 	struct drm_i915_gem_object *obj = NULL;
44 	struct i915_vma *vma = NULL;
45 	void *cmd_in, *cmd_out;
46 	int err;
47 
48 	/* allocate object of two page for HDCP command memory and store it */
49 	obj = i915_gem_object_create_shmem(i915, 2 * PAGE_SIZE);
50 
51 	if (IS_ERR(obj)) {
52 		drm_err(&i915->drm, "Failed to allocate HDCP streaming command!\n");
53 		return PTR_ERR(obj);
54 	}
55 
56 	cmd_in = i915_gem_object_pin_map_unlocked(obj, intel_gt_coherent_map_type(gt, obj, true));
57 	if (IS_ERR(cmd_in)) {
58 		drm_err(&i915->drm, "Failed to map gsc message page!\n");
59 		err = PTR_ERR(cmd_in);
60 		goto out_unpin;
61 	}
62 
63 	cmd_out = cmd_in + PAGE_SIZE;
64 
65 	vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
66 	if (IS_ERR(vma)) {
67 		err = PTR_ERR(vma);
68 		goto out_unmap;
69 	}
70 
71 	err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
72 	if (err)
73 		goto out_unmap;
74 
75 	memset(cmd_in, 0, obj->base.size);
76 
77 	gsc_context->hdcp_cmd_in = cmd_in;
78 	gsc_context->hdcp_cmd_out = cmd_out;
79 	gsc_context->vma = vma;
80 	gsc_context->i915 = i915;
81 
82 	return 0;
83 
84 out_unmap:
85 	i915_gem_object_unpin_map(obj);
86 out_unpin:
87 	i915_gem_object_put(obj);
88 	return err;
89 }
90 
91 struct intel_hdcp_gsc_context *intel_hdcp_gsc_context_alloc(struct drm_device *drm)
92 {
93 	struct drm_i915_private *i915 = to_i915(drm);
94 	struct intel_hdcp_gsc_context *gsc_context;
95 	int ret;
96 
97 	gsc_context = kzalloc(sizeof(*gsc_context), GFP_KERNEL);
98 	if (!gsc_context)
99 		return ERR_PTR(-ENOMEM);
100 
101 	/*
102 	 * NOTE: No need to lock the comp mutex here as it is already
103 	 * going to be taken before this function called
104 	 */
105 	ret = intel_hdcp_gsc_initialize_message(i915, gsc_context);
106 	if (ret) {
107 		drm_err(&i915->drm, "Could not initialize gsc_context\n");
108 		kfree(gsc_context);
109 		gsc_context = ERR_PTR(ret);
110 	}
111 
112 	return gsc_context;
113 }
114 
115 void intel_hdcp_gsc_context_free(struct intel_hdcp_gsc_context *gsc_context)
116 {
117 	if (!gsc_context)
118 		return;
119 
120 	i915_vma_unpin_and_release(&gsc_context->vma, I915_VMA_RELEASE_MAP);
121 	kfree(gsc_context);
122 }
123 
124 static int intel_gsc_send_sync(struct drm_i915_private *i915,
125 			       struct intel_gsc_mtl_header *header_in,
126 			       struct intel_gsc_mtl_header *header_out,
127 			       u64 addr_in, u64 addr_out,
128 			       size_t msg_out_len)
129 {
130 	struct intel_gt *gt = i915->media_gt;
131 	int ret;
132 
133 	ret = intel_gsc_uc_heci_cmd_submit_packet(&gt->uc.gsc, addr_in,
134 						  header_in->message_size,
135 						  addr_out,
136 						  msg_out_len + sizeof(*header_out));
137 	if (ret) {
138 		drm_err(&i915->drm, "failed to send gsc HDCP msg (%d)\n", ret);
139 		return ret;
140 	}
141 
142 	/*
143 	 * Checking validity marker and header status to see if some error has
144 	 * blocked us from sending message to gsc cs
145 	 */
146 	if (header_out->validity_marker != GSC_HECI_VALIDITY_MARKER) {
147 		drm_err(&i915->drm, "invalid validity marker\n");
148 		return -EINVAL;
149 	}
150 
151 	if (header_out->status != 0) {
152 		drm_err(&i915->drm, "header status indicates error %d\n",
153 			header_out->status);
154 		return -EINVAL;
155 	}
156 
157 	if (header_out->flags & GSC_OUTFLAG_MSG_PENDING) {
158 		header_in->gsc_message_handle = header_out->gsc_message_handle;
159 		return -EAGAIN;
160 	}
161 
162 	return 0;
163 }
164 
165 /*
166  * This function can now be used for sending requests and will also handle
167  * receipt of reply messages hence no different function of message retrieval
168  * is required. We will initialize intel_hdcp_gsc_context structure then add
169  * gsc cs memory header as stated in specs after which the normal HDCP payload
170  * will follow
171  */
172 ssize_t intel_hdcp_gsc_msg_send(struct intel_hdcp_gsc_context *gsc_context,
173 				void *msg_in, size_t msg_in_len,
174 				void *msg_out, size_t msg_out_len)
175 {
176 	struct drm_i915_private *i915 = gsc_context->i915;
177 	struct intel_gt *gt = i915->media_gt;
178 	struct intel_gsc_mtl_header *header_in, *header_out;
179 	const size_t max_msg_size = PAGE_SIZE - sizeof(*header_in);
180 	u64 addr_in, addr_out, host_session_id;
181 	u32 reply_size, msg_size_in, msg_size_out;
182 	int ret, tries = 0;
183 
184 	if (!intel_uc_uses_gsc_uc(&gt->uc))
185 		return -ENODEV;
186 
187 	if (msg_in_len > max_msg_size || msg_out_len > max_msg_size)
188 		return -ENOSPC;
189 
190 	msg_size_in = msg_in_len + sizeof(*header_in);
191 	msg_size_out = msg_out_len + sizeof(*header_out);
192 	header_in = gsc_context->hdcp_cmd_in;
193 	header_out = gsc_context->hdcp_cmd_out;
194 	addr_in = i915_ggtt_offset(gsc_context->vma);
195 	addr_out = addr_in + PAGE_SIZE;
196 
197 	memset(header_in, 0, msg_size_in);
198 	memset(header_out, 0, msg_size_out);
199 	get_random_bytes(&host_session_id, sizeof(u64));
200 	intel_gsc_uc_heci_cmd_emit_mtl_header(header_in, HECI_MEADDRESS_HDCP,
201 					      msg_size_in, host_session_id);
202 	memcpy(gsc_context->hdcp_cmd_in + sizeof(*header_in), msg_in, msg_in_len);
203 
204 	/*
205 	 * Keep sending request in case the pending bit is set no need to add
206 	 * message handle as we are using same address hence loc. of header is
207 	 * same and it will contain the message handle. we will send the message
208 	 * 20 times each message 50 ms apart
209 	 */
210 	do {
211 		ret = intel_gsc_send_sync(i915, header_in, header_out, addr_in,
212 					  addr_out, msg_out_len);
213 
214 		/* Only try again if gsc says so */
215 		if (ret != -EAGAIN)
216 			break;
217 
218 		msleep(50);
219 
220 	} while (++tries < 20);
221 
222 	if (ret)
223 		goto err;
224 
225 	/* we use the same mem for the reply, so header is in the same loc */
226 	reply_size = header_out->message_size - sizeof(*header_out);
227 	if (reply_size > msg_out_len) {
228 		drm_warn(&i915->drm, "caller with insufficient HDCP reply size %u (%d)\n",
229 			 reply_size, (u32)msg_out_len);
230 		reply_size = msg_out_len;
231 	} else if (reply_size != msg_out_len) {
232 		drm_dbg_kms(&i915->drm, "caller unexpected HCDP reply size %u (%d)\n",
233 			    reply_size, (u32)msg_out_len);
234 	}
235 
236 	memcpy(msg_out, gsc_context->hdcp_cmd_out + sizeof(*header_out), msg_out_len);
237 
238 err:
239 	return ret;
240 }
241