xref: /linux/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c (revision 47cebb740a83682224654a6583a20efd9f3cfeae)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright 2023, Intel Corporation.
4  */
5 
6 #include <drm/drm_print.h>
7 #include <drm/intel/i915_hdcp_interface.h>
8 #include <linux/delay.h>
9 
10 #include "abi/gsc_command_header_abi.h"
11 #include "intel_hdcp_gsc.h"
12 #include "intel_hdcp_gsc_message.h"
13 #include "xe_bo.h"
14 #include "xe_device.h"
15 #include "xe_device_types.h"
16 #include "xe_force_wake.h"
17 #include "xe_gsc_proxy.h"
18 #include "xe_gsc_submit.h"
19 #include "xe_gt.h"
20 #include "xe_map.h"
21 #include "xe_pm.h"
22 #include "xe_uc_fw.h"
23 
24 #define HECI_MEADDRESS_HDCP 18
25 
26 struct intel_hdcp_gsc_message {
27 	struct xe_bo *hdcp_bo;
28 	u64 hdcp_cmd_in;
29 	u64 hdcp_cmd_out;
30 };
31 
32 #define HDCP_GSC_HEADER_SIZE sizeof(struct intel_gsc_mtl_header)
33 
34 bool intel_hdcp_gsc_cs_required(struct xe_device *xe)
35 {
36 	return DISPLAY_VER(xe) >= 14;
37 }
38 
39 bool intel_hdcp_gsc_check_status(struct xe_device *xe)
40 {
41 	struct xe_tile *tile = xe_device_get_root_tile(xe);
42 	struct xe_gt *gt = tile->media_gt;
43 	struct xe_gsc *gsc = &gt->uc.gsc;
44 	bool ret = true;
45 
46 	if (!gsc && !xe_uc_fw_is_enabled(&gsc->fw)) {
47 		drm_dbg_kms(&xe->drm,
48 			    "GSC Components not ready for HDCP2.x\n");
49 		return false;
50 	}
51 
52 	xe_pm_runtime_get(xe);
53 	if (xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC)) {
54 		drm_dbg_kms(&xe->drm,
55 			    "failed to get forcewake to check proxy status\n");
56 		ret = false;
57 		goto out;
58 	}
59 
60 	if (!xe_gsc_proxy_init_done(gsc))
61 		ret = false;
62 
63 	xe_force_wake_put(gt_to_fw(gt), XE_FW_GSC);
64 out:
65 	xe_pm_runtime_put(xe);
66 	return ret;
67 }
68 
69 /*This function helps allocate memory for the command that we will send to gsc cs */
70 static int intel_hdcp_gsc_initialize_message(struct xe_device *xe,
71 					     struct intel_hdcp_gsc_message *hdcp_message)
72 {
73 	struct xe_bo *bo = NULL;
74 	u64 cmd_in, cmd_out;
75 	int ret = 0;
76 
77 	/* allocate object of two page for HDCP command memory and store it */
78 	bo = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe), NULL, PAGE_SIZE * 2,
79 				  ttm_bo_type_kernel,
80 				  XE_BO_FLAG_SYSTEM |
81 				  XE_BO_FLAG_GGTT);
82 
83 	if (IS_ERR(bo)) {
84 		drm_err(&xe->drm, "Failed to allocate bo for HDCP streaming command!\n");
85 		ret = PTR_ERR(bo);
86 		goto out;
87 	}
88 
89 	cmd_in = xe_bo_ggtt_addr(bo);
90 	cmd_out = cmd_in + PAGE_SIZE;
91 	xe_map_memset(xe, &bo->vmap, 0, 0, bo->size);
92 
93 	hdcp_message->hdcp_bo = bo;
94 	hdcp_message->hdcp_cmd_in = cmd_in;
95 	hdcp_message->hdcp_cmd_out = cmd_out;
96 out:
97 	return ret;
98 }
99 
100 static int intel_hdcp_gsc_hdcp2_init(struct xe_device *xe)
101 {
102 	struct intel_hdcp_gsc_message *hdcp_message;
103 	int ret;
104 
105 	hdcp_message = kzalloc(sizeof(*hdcp_message), GFP_KERNEL);
106 
107 	if (!hdcp_message)
108 		return -ENOMEM;
109 
110 	/*
111 	 * NOTE: No need to lock the comp mutex here as it is already
112 	 * going to be taken before this function called
113 	 */
114 	ret = intel_hdcp_gsc_initialize_message(xe, hdcp_message);
115 	if (ret) {
116 		drm_err(&xe->drm, "Could not initialize hdcp_message\n");
117 		kfree(hdcp_message);
118 		return ret;
119 	}
120 
121 	xe->display.hdcp.hdcp_message = hdcp_message;
122 	return ret;
123 }
124 
125 static const struct i915_hdcp_ops gsc_hdcp_ops = {
126 	.initiate_hdcp2_session = intel_hdcp_gsc_initiate_session,
127 	.verify_receiver_cert_prepare_km =
128 				intel_hdcp_gsc_verify_receiver_cert_prepare_km,
129 	.verify_hprime = intel_hdcp_gsc_verify_hprime,
130 	.store_pairing_info = intel_hdcp_gsc_store_pairing_info,
131 	.initiate_locality_check = intel_hdcp_gsc_initiate_locality_check,
132 	.verify_lprime = intel_hdcp_gsc_verify_lprime,
133 	.get_session_key = intel_hdcp_gsc_get_session_key,
134 	.repeater_check_flow_prepare_ack =
135 				intel_hdcp_gsc_repeater_check_flow_prepare_ack,
136 	.verify_mprime = intel_hdcp_gsc_verify_mprime,
137 	.enable_hdcp_authentication = intel_hdcp_gsc_enable_authentication,
138 	.close_hdcp_session = intel_hdcp_gsc_close_session,
139 };
140 
141 int intel_hdcp_gsc_init(struct xe_device *xe)
142 {
143 	struct i915_hdcp_arbiter *data;
144 	int ret;
145 
146 	data = kzalloc(sizeof(*data), GFP_KERNEL);
147 	if (!data)
148 		return -ENOMEM;
149 
150 	mutex_lock(&xe->display.hdcp.hdcp_mutex);
151 	xe->display.hdcp.arbiter = data;
152 	xe->display.hdcp.arbiter->hdcp_dev = xe->drm.dev;
153 	xe->display.hdcp.arbiter->ops = &gsc_hdcp_ops;
154 	ret = intel_hdcp_gsc_hdcp2_init(xe);
155 	if (ret)
156 		kfree(data);
157 
158 	mutex_unlock(&xe->display.hdcp.hdcp_mutex);
159 
160 	return ret;
161 }
162 
163 void intel_hdcp_gsc_fini(struct xe_device *xe)
164 {
165 	struct intel_hdcp_gsc_message *hdcp_message =
166 					xe->display.hdcp.hdcp_message;
167 	struct i915_hdcp_arbiter *arb = xe->display.hdcp.arbiter;
168 
169 	if (hdcp_message) {
170 		xe_bo_unpin_map_no_vm(hdcp_message->hdcp_bo);
171 		kfree(hdcp_message);
172 		xe->display.hdcp.hdcp_message = NULL;
173 	}
174 
175 	kfree(arb);
176 	xe->display.hdcp.arbiter = NULL;
177 }
178 
179 static int xe_gsc_send_sync(struct xe_device *xe,
180 			    struct intel_hdcp_gsc_message *hdcp_message,
181 			    u32 msg_size_in, u32 msg_size_out,
182 			    u32 addr_out_off)
183 {
184 	struct xe_gt *gt = hdcp_message->hdcp_bo->tile->media_gt;
185 	struct iosys_map *map = &hdcp_message->hdcp_bo->vmap;
186 	struct xe_gsc *gsc = &gt->uc.gsc;
187 	int ret;
188 
189 	ret = xe_gsc_pkt_submit_kernel(gsc, hdcp_message->hdcp_cmd_in, msg_size_in,
190 				       hdcp_message->hdcp_cmd_out, msg_size_out);
191 	if (ret) {
192 		drm_err(&xe->drm, "failed to send gsc HDCP msg (%d)\n", ret);
193 		return ret;
194 	}
195 
196 	if (xe_gsc_check_and_update_pending(xe, map, 0, map, addr_out_off))
197 		return -EAGAIN;
198 
199 	ret = xe_gsc_read_out_header(xe, map, addr_out_off,
200 				     sizeof(struct hdcp_cmd_header), NULL);
201 
202 	return ret;
203 }
204 
205 ssize_t intel_hdcp_gsc_msg_send(struct xe_device *xe, u8 *msg_in,
206 				size_t msg_in_len, u8 *msg_out,
207 				size_t msg_out_len)
208 {
209 	const size_t max_msg_size = PAGE_SIZE - HDCP_GSC_HEADER_SIZE;
210 	struct intel_hdcp_gsc_message *hdcp_message;
211 	u64 host_session_id;
212 	u32 msg_size_in, msg_size_out;
213 	u32 addr_out_off, addr_in_wr_off = 0;
214 	int ret, tries = 0;
215 
216 	if (msg_in_len > max_msg_size || msg_out_len > max_msg_size) {
217 		ret = -ENOSPC;
218 		goto out;
219 	}
220 
221 	msg_size_in = msg_in_len + HDCP_GSC_HEADER_SIZE;
222 	msg_size_out = msg_out_len + HDCP_GSC_HEADER_SIZE;
223 	hdcp_message = xe->display.hdcp.hdcp_message;
224 	addr_out_off = PAGE_SIZE;
225 
226 	host_session_id = xe_gsc_create_host_session_id();
227 	xe_pm_runtime_get_noresume(xe);
228 	addr_in_wr_off = xe_gsc_emit_header(xe, &hdcp_message->hdcp_bo->vmap,
229 					    addr_in_wr_off, HECI_MEADDRESS_HDCP,
230 					    host_session_id, msg_in_len);
231 	xe_map_memcpy_to(xe, &hdcp_message->hdcp_bo->vmap, addr_in_wr_off,
232 			 msg_in, msg_in_len);
233 	/*
234 	 * Keep sending request in case the pending bit is set no need to add
235 	 * message handle as we are using same address hence loc. of header is
236 	 * same and it will contain the message handle. we will send the message
237 	 * 20 times each message 50 ms apart
238 	 */
239 	do {
240 		ret = xe_gsc_send_sync(xe, hdcp_message, msg_size_in, msg_size_out,
241 				       addr_out_off);
242 
243 		/* Only try again if gsc says so */
244 		if (ret != -EAGAIN)
245 			break;
246 
247 		msleep(50);
248 
249 	} while (++tries < 20);
250 
251 	if (ret)
252 		goto out;
253 
254 	xe_map_memcpy_from(xe, msg_out, &hdcp_message->hdcp_bo->vmap,
255 			   addr_out_off + HDCP_GSC_HEADER_SIZE,
256 			   msg_out_len);
257 
258 out:
259 	xe_pm_runtime_put(xe);
260 	return ret;
261 }
262