xref: /linux/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c (revision fcab107abe1ab5be9dbe874baa722372da8f4f73)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright 2023, Intel Corporation.
4  */
5 
6 #include <drm/intel/i915_hdcp_interface.h>
7 
8 #include "gem/i915_gem_region.h"
9 #include "gt/intel_gt.h"
10 #include "gt/uc/intel_gsc_uc_heci_cmd_submit.h"
11 #include "i915_drv.h"
12 #include "i915_utils.h"
13 #include "intel_hdcp_gsc.h"
14 
15 struct intel_hdcp_gsc_context {
16 	struct drm_i915_private *i915;
17 	struct i915_vma *vma;
18 	void *hdcp_cmd_in;
19 	void *hdcp_cmd_out;
20 };
21 
22 bool intel_hdcp_gsc_check_status(struct drm_device *drm)
23 {
24 	struct drm_i915_private *i915 = to_i915(drm);
25 	struct intel_gt *gt = i915->media_gt;
26 	struct intel_gsc_uc *gsc = gt ? &gt->uc.gsc : NULL;
27 
28 	if (!gsc || !intel_uc_fw_is_running(&gsc->fw)) {
29 		drm_dbg_kms(&i915->drm,
30 			    "GSC components required for HDCP2.2 are not ready\n");
31 		return false;
32 	}
33 
34 	return true;
35 }
36 
37 /*This function helps allocate memory for the command that we will send to gsc cs */
38 static int intel_hdcp_gsc_initialize_message(struct drm_i915_private *i915,
39 					     struct intel_hdcp_gsc_context *gsc_context)
40 {
41 	struct intel_gt *gt = i915->media_gt;
42 	struct drm_i915_gem_object *obj = NULL;
43 	struct i915_vma *vma = NULL;
44 	void *cmd_in, *cmd_out;
45 	int err;
46 
47 	/* allocate object of two page for HDCP command memory and store it */
48 	obj = i915_gem_object_create_shmem(i915, 2 * PAGE_SIZE);
49 
50 	if (IS_ERR(obj)) {
51 		drm_err(&i915->drm, "Failed to allocate HDCP streaming command!\n");
52 		return PTR_ERR(obj);
53 	}
54 
55 	cmd_in = i915_gem_object_pin_map_unlocked(obj, intel_gt_coherent_map_type(gt, obj, true));
56 	if (IS_ERR(cmd_in)) {
57 		drm_err(&i915->drm, "Failed to map gsc message page!\n");
58 		err = PTR_ERR(cmd_in);
59 		goto out_unpin;
60 	}
61 
62 	cmd_out = cmd_in + PAGE_SIZE;
63 
64 	vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
65 	if (IS_ERR(vma)) {
66 		err = PTR_ERR(vma);
67 		goto out_unmap;
68 	}
69 
70 	err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
71 	if (err)
72 		goto out_unmap;
73 
74 	memset(cmd_in, 0, obj->base.size);
75 
76 	gsc_context->hdcp_cmd_in = cmd_in;
77 	gsc_context->hdcp_cmd_out = cmd_out;
78 	gsc_context->vma = vma;
79 	gsc_context->i915 = i915;
80 
81 	return 0;
82 
83 out_unmap:
84 	i915_gem_object_unpin_map(obj);
85 out_unpin:
86 	i915_gem_object_put(obj);
87 	return err;
88 }
89 
90 struct intel_hdcp_gsc_context *intel_hdcp_gsc_context_alloc(struct drm_device *drm)
91 {
92 	struct drm_i915_private *i915 = to_i915(drm);
93 	struct intel_hdcp_gsc_context *gsc_context;
94 	int ret;
95 
96 	gsc_context = kzalloc(sizeof(*gsc_context), GFP_KERNEL);
97 	if (!gsc_context)
98 		return ERR_PTR(-ENOMEM);
99 
100 	/*
101 	 * NOTE: No need to lock the comp mutex here as it is already
102 	 * going to be taken before this function called
103 	 */
104 	ret = intel_hdcp_gsc_initialize_message(i915, gsc_context);
105 	if (ret) {
106 		drm_err(&i915->drm, "Could not initialize gsc_context\n");
107 		kfree(gsc_context);
108 		gsc_context = ERR_PTR(ret);
109 	}
110 
111 	return gsc_context;
112 }
113 
114 void intel_hdcp_gsc_context_free(struct intel_hdcp_gsc_context *gsc_context)
115 {
116 	if (!gsc_context)
117 		return;
118 
119 	i915_vma_unpin_and_release(&gsc_context->vma, I915_VMA_RELEASE_MAP);
120 	kfree(gsc_context);
121 }
122 
123 static int intel_gsc_send_sync(struct drm_i915_private *i915,
124 			       struct intel_gsc_mtl_header *header_in,
125 			       struct intel_gsc_mtl_header *header_out,
126 			       u64 addr_in, u64 addr_out,
127 			       size_t msg_out_len)
128 {
129 	struct intel_gt *gt = i915->media_gt;
130 	int ret;
131 
132 	ret = intel_gsc_uc_heci_cmd_submit_packet(&gt->uc.gsc, addr_in,
133 						  header_in->message_size,
134 						  addr_out,
135 						  msg_out_len + sizeof(*header_out));
136 	if (ret) {
137 		drm_err(&i915->drm, "failed to send gsc HDCP msg (%d)\n", ret);
138 		return ret;
139 	}
140 
141 	/*
142 	 * Checking validity marker and header status to see if some error has
143 	 * blocked us from sending message to gsc cs
144 	 */
145 	if (header_out->validity_marker != GSC_HECI_VALIDITY_MARKER) {
146 		drm_err(&i915->drm, "invalid validity marker\n");
147 		return -EINVAL;
148 	}
149 
150 	if (header_out->status != 0) {
151 		drm_err(&i915->drm, "header status indicates error %d\n",
152 			header_out->status);
153 		return -EINVAL;
154 	}
155 
156 	if (header_out->flags & GSC_OUTFLAG_MSG_PENDING) {
157 		header_in->gsc_message_handle = header_out->gsc_message_handle;
158 		return -EAGAIN;
159 	}
160 
161 	return 0;
162 }
163 
164 /*
165  * This function can now be used for sending requests and will also handle
166  * receipt of reply messages hence no different function of message retrieval
167  * is required. We will initialize intel_hdcp_gsc_context structure then add
168  * gsc cs memory header as stated in specs after which the normal HDCP payload
169  * will follow
170  */
171 ssize_t intel_hdcp_gsc_msg_send(struct intel_hdcp_gsc_context *gsc_context,
172 				void *msg_in, size_t msg_in_len,
173 				void *msg_out, size_t msg_out_len)
174 {
175 	struct drm_i915_private *i915 = gsc_context->i915;
176 	struct intel_gt *gt = i915->media_gt;
177 	struct intel_gsc_mtl_header *header_in, *header_out;
178 	const size_t max_msg_size = PAGE_SIZE - sizeof(*header_in);
179 	u64 addr_in, addr_out, host_session_id;
180 	u32 reply_size, msg_size_in, msg_size_out;
181 	int ret, tries = 0;
182 
183 	if (!intel_uc_uses_gsc_uc(&gt->uc))
184 		return -ENODEV;
185 
186 	if (msg_in_len > max_msg_size || msg_out_len > max_msg_size)
187 		return -ENOSPC;
188 
189 	msg_size_in = msg_in_len + sizeof(*header_in);
190 	msg_size_out = msg_out_len + sizeof(*header_out);
191 	header_in = gsc_context->hdcp_cmd_in;
192 	header_out = gsc_context->hdcp_cmd_out;
193 	addr_in = i915_ggtt_offset(gsc_context->vma);
194 	addr_out = addr_in + PAGE_SIZE;
195 
196 	memset(header_in, 0, msg_size_in);
197 	memset(header_out, 0, msg_size_out);
198 	get_random_bytes(&host_session_id, sizeof(u64));
199 	intel_gsc_uc_heci_cmd_emit_mtl_header(header_in, HECI_MEADDRESS_HDCP,
200 					      msg_size_in, host_session_id);
201 	memcpy(gsc_context->hdcp_cmd_in + sizeof(*header_in), msg_in, msg_in_len);
202 
203 	/*
204 	 * Keep sending request in case the pending bit is set no need to add
205 	 * message handle as we are using same address hence loc. of header is
206 	 * same and it will contain the message handle. we will send the message
207 	 * 20 times each message 50 ms apart
208 	 */
209 	do {
210 		ret = intel_gsc_send_sync(i915, header_in, header_out, addr_in,
211 					  addr_out, msg_out_len);
212 
213 		/* Only try again if gsc says so */
214 		if (ret != -EAGAIN)
215 			break;
216 
217 		msleep(50);
218 
219 	} while (++tries < 20);
220 
221 	if (ret)
222 		goto err;
223 
224 	/* we use the same mem for the reply, so header is in the same loc */
225 	reply_size = header_out->message_size - sizeof(*header_out);
226 	if (reply_size > msg_out_len) {
227 		drm_warn(&i915->drm, "caller with insufficient HDCP reply size %u (%d)\n",
228 			 reply_size, (u32)msg_out_len);
229 		reply_size = msg_out_len;
230 	} else if (reply_size != msg_out_len) {
231 		drm_dbg_kms(&i915->drm, "caller unexpected HCDP reply size %u (%d)\n",
232 			    reply_size, (u32)msg_out_len);
233 	}
234 
235 	memcpy(msg_out, gsc_context->hdcp_cmd_out + sizeof(*header_out), msg_out_len);
236 
237 err:
238 	return ret;
239 }
240