xref: /linux/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c (revision 2697b79a469b68e3ad3640f55284359c1396278d)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright 2023, Intel Corporation.
4  */
5 
6 #include <drm/drm_print.h>
7 #include <drm/i915_hdcp_interface.h>
8 #include <linux/delay.h>
9 
10 #include "abi/gsc_command_header_abi.h"
11 #include "intel_hdcp_gsc.h"
12 #include "intel_hdcp_gsc_message.h"
13 #include "xe_bo.h"
14 #include "xe_device.h"
15 #include "xe_device_types.h"
16 #include "xe_gsc_proxy.h"
17 #include "xe_gsc_submit.h"
18 #include "xe_gt.h"
19 #include "xe_map.h"
20 #include "xe_pm.h"
21 #include "xe_uc_fw.h"
22 
23 #define HECI_MEADDRESS_HDCP 18
24 
25 struct intel_hdcp_gsc_message {
26 	struct xe_bo *hdcp_bo;
27 	u64 hdcp_cmd_in;
28 	u64 hdcp_cmd_out;
29 };
30 
31 #define HDCP_GSC_HEADER_SIZE sizeof(struct intel_gsc_mtl_header)
32 
33 bool intel_hdcp_gsc_cs_required(struct xe_device *xe)
34 {
35 	return DISPLAY_VER(xe) >= 14;
36 }
37 
38 bool intel_hdcp_gsc_check_status(struct xe_device *xe)
39 {
40 	struct xe_tile *tile = xe_device_get_root_tile(xe);
41 	struct xe_gt *gt = tile->media_gt;
42 	bool ret = true;
43 
44 	if (!xe_uc_fw_is_enabled(&gt->uc.gsc.fw))
45 		return false;
46 
47 	xe_pm_runtime_get(xe);
48 	if (xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC)) {
49 		drm_dbg_kms(&xe->drm,
50 			    "failed to get forcewake to check proxy status\n");
51 		ret = false;
52 		goto out;
53 	}
54 
55 	if (!xe_gsc_proxy_init_done(&gt->uc.gsc))
56 		ret = false;
57 
58 	xe_force_wake_put(gt_to_fw(gt), XE_FW_GSC);
59 out:
60 	xe_pm_runtime_put(xe);
61 	return ret;
62 }
63 
64 /*This function helps allocate memory for the command that we will send to gsc cs */
65 static int intel_hdcp_gsc_initialize_message(struct xe_device *xe,
66 					     struct intel_hdcp_gsc_message *hdcp_message)
67 {
68 	struct xe_bo *bo = NULL;
69 	u64 cmd_in, cmd_out;
70 	int ret = 0;
71 
72 	/* allocate object of two page for HDCP command memory and store it */
73 	bo = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe), NULL, PAGE_SIZE * 2,
74 				  ttm_bo_type_kernel,
75 				  XE_BO_FLAG_SYSTEM |
76 				  XE_BO_FLAG_GGTT);
77 
78 	if (IS_ERR(bo)) {
79 		drm_err(&xe->drm, "Failed to allocate bo for HDCP streaming command!\n");
80 		ret = PTR_ERR(bo);
81 		goto out;
82 	}
83 
84 	cmd_in = xe_bo_ggtt_addr(bo);
85 	cmd_out = cmd_in + PAGE_SIZE;
86 	xe_map_memset(xe, &bo->vmap, 0, 0, bo->size);
87 
88 	hdcp_message->hdcp_bo = bo;
89 	hdcp_message->hdcp_cmd_in = cmd_in;
90 	hdcp_message->hdcp_cmd_out = cmd_out;
91 out:
92 	return ret;
93 }
94 
95 static int intel_hdcp_gsc_hdcp2_init(struct xe_device *xe)
96 {
97 	struct intel_hdcp_gsc_message *hdcp_message;
98 	int ret;
99 
100 	hdcp_message = kzalloc(sizeof(*hdcp_message), GFP_KERNEL);
101 
102 	if (!hdcp_message)
103 		return -ENOMEM;
104 
105 	/*
106 	 * NOTE: No need to lock the comp mutex here as it is already
107 	 * going to be taken before this function called
108 	 */
109 	ret = intel_hdcp_gsc_initialize_message(xe, hdcp_message);
110 	if (ret) {
111 		drm_err(&xe->drm, "Could not initialize hdcp_message\n");
112 		kfree(hdcp_message);
113 		return ret;
114 	}
115 
116 	xe->display.hdcp.hdcp_message = hdcp_message;
117 	return ret;
118 }
119 
120 static const struct i915_hdcp_ops gsc_hdcp_ops = {
121 	.initiate_hdcp2_session = intel_hdcp_gsc_initiate_session,
122 	.verify_receiver_cert_prepare_km =
123 				intel_hdcp_gsc_verify_receiver_cert_prepare_km,
124 	.verify_hprime = intel_hdcp_gsc_verify_hprime,
125 	.store_pairing_info = intel_hdcp_gsc_store_pairing_info,
126 	.initiate_locality_check = intel_hdcp_gsc_initiate_locality_check,
127 	.verify_lprime = intel_hdcp_gsc_verify_lprime,
128 	.get_session_key = intel_hdcp_gsc_get_session_key,
129 	.repeater_check_flow_prepare_ack =
130 				intel_hdcp_gsc_repeater_check_flow_prepare_ack,
131 	.verify_mprime = intel_hdcp_gsc_verify_mprime,
132 	.enable_hdcp_authentication = intel_hdcp_gsc_enable_authentication,
133 	.close_hdcp_session = intel_hdcp_gsc_close_session,
134 };
135 
136 int intel_hdcp_gsc_init(struct xe_device *xe)
137 {
138 	struct i915_hdcp_arbiter *data;
139 	int ret;
140 
141 	data = kzalloc(sizeof(*data), GFP_KERNEL);
142 	if (!data)
143 		return -ENOMEM;
144 
145 	mutex_lock(&xe->display.hdcp.hdcp_mutex);
146 	xe->display.hdcp.arbiter = data;
147 	xe->display.hdcp.arbiter->hdcp_dev = xe->drm.dev;
148 	xe->display.hdcp.arbiter->ops = &gsc_hdcp_ops;
149 	ret = intel_hdcp_gsc_hdcp2_init(xe);
150 	if (ret)
151 		kfree(data);
152 
153 	mutex_unlock(&xe->display.hdcp.hdcp_mutex);
154 
155 	return ret;
156 }
157 
158 void intel_hdcp_gsc_fini(struct xe_device *xe)
159 {
160 	struct intel_hdcp_gsc_message *hdcp_message =
161 					xe->display.hdcp.hdcp_message;
162 
163 	if (!hdcp_message)
164 		return;
165 
166 	xe_bo_unpin_map_no_vm(hdcp_message->hdcp_bo);
167 	kfree(hdcp_message);
168 }
169 
170 static int xe_gsc_send_sync(struct xe_device *xe,
171 			    struct intel_hdcp_gsc_message *hdcp_message,
172 			    u32 msg_size_in, u32 msg_size_out,
173 			    u32 addr_out_off)
174 {
175 	struct xe_gt *gt = hdcp_message->hdcp_bo->tile->media_gt;
176 	struct iosys_map *map = &hdcp_message->hdcp_bo->vmap;
177 	struct xe_gsc *gsc = &gt->uc.gsc;
178 	int ret;
179 
180 	ret = xe_gsc_pkt_submit_kernel(gsc, hdcp_message->hdcp_cmd_in, msg_size_in,
181 				       hdcp_message->hdcp_cmd_out, msg_size_out);
182 	if (ret) {
183 		drm_err(&xe->drm, "failed to send gsc HDCP msg (%d)\n", ret);
184 		return ret;
185 	}
186 
187 	if (xe_gsc_check_and_update_pending(xe, map, 0, map, addr_out_off))
188 		return -EAGAIN;
189 
190 	ret = xe_gsc_read_out_header(xe, map, addr_out_off,
191 				     sizeof(struct hdcp_cmd_header), NULL);
192 
193 	return ret;
194 }
195 
196 ssize_t intel_hdcp_gsc_msg_send(struct xe_device *xe, u8 *msg_in,
197 				size_t msg_in_len, u8 *msg_out,
198 				size_t msg_out_len)
199 {
200 	const size_t max_msg_size = PAGE_SIZE - HDCP_GSC_HEADER_SIZE;
201 	struct intel_hdcp_gsc_message *hdcp_message;
202 	u64 host_session_id;
203 	u32 msg_size_in, msg_size_out;
204 	u32 addr_out_off, addr_in_wr_off = 0;
205 	int ret, tries = 0;
206 
207 	if (msg_in_len > max_msg_size || msg_out_len > max_msg_size) {
208 		ret = -ENOSPC;
209 		goto out;
210 	}
211 
212 	msg_size_in = msg_in_len + HDCP_GSC_HEADER_SIZE;
213 	msg_size_out = msg_out_len + HDCP_GSC_HEADER_SIZE;
214 	hdcp_message = xe->display.hdcp.hdcp_message;
215 	addr_out_off = PAGE_SIZE;
216 
217 	host_session_id = xe_gsc_create_host_session_id();
218 	xe_pm_runtime_get_noresume(xe);
219 	addr_in_wr_off = xe_gsc_emit_header(xe, &hdcp_message->hdcp_bo->vmap,
220 					    addr_in_wr_off, HECI_MEADDRESS_HDCP,
221 					    host_session_id, msg_in_len);
222 	xe_map_memcpy_to(xe, &hdcp_message->hdcp_bo->vmap, addr_in_wr_off,
223 			 msg_in, msg_in_len);
224 	/*
225 	 * Keep sending request in case the pending bit is set no need to add
226 	 * message handle as we are using same address hence loc. of header is
227 	 * same and it will contain the message handle. we will send the message
228 	 * 20 times each message 50 ms apart
229 	 */
230 	do {
231 		ret = xe_gsc_send_sync(xe, hdcp_message, msg_size_in, msg_size_out,
232 				       addr_out_off);
233 
234 		/* Only try again if gsc says so */
235 		if (ret != -EAGAIN)
236 			break;
237 
238 		msleep(50);
239 
240 	} while (++tries < 20);
241 
242 	if (ret)
243 		goto out;
244 
245 	xe_map_memcpy_from(xe, msg_out, &hdcp_message->hdcp_bo->vmap,
246 			   addr_out_off + HDCP_GSC_HEADER_SIZE,
247 			   msg_out_len);
248 
249 out:
250 	xe_pm_runtime_put(xe);
251 	return ret;
252 }
253