xref: /linux/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c (revision a3a02a52bcfcbcc4a637d4b68bf1bc391c9fad02)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright 2023, Intel Corporation.
4  */
5 
6 #include <drm/drm_print.h>
7 #include <drm/intel/i915_hdcp_interface.h>
8 #include <linux/delay.h>
9 
10 #include "abi/gsc_command_header_abi.h"
11 #include "intel_hdcp_gsc.h"
12 #include "intel_hdcp_gsc_message.h"
13 #include "xe_bo.h"
14 #include "xe_device.h"
15 #include "xe_device_types.h"
16 #include "xe_force_wake.h"
17 #include "xe_gsc_proxy.h"
18 #include "xe_gsc_submit.h"
19 #include "xe_gt.h"
20 #include "xe_map.h"
21 #include "xe_pm.h"
22 #include "xe_uc_fw.h"
23 
24 #define HECI_MEADDRESS_HDCP 18
25 
26 struct intel_hdcp_gsc_message {
27 	struct xe_bo *hdcp_bo;
28 	u64 hdcp_cmd_in;
29 	u64 hdcp_cmd_out;
30 };
31 
32 #define HDCP_GSC_HEADER_SIZE sizeof(struct intel_gsc_mtl_header)
33 
34 bool intel_hdcp_gsc_cs_required(struct xe_device *xe)
35 {
36 	return DISPLAY_VER(xe) >= 14;
37 }
38 
39 bool intel_hdcp_gsc_check_status(struct xe_device *xe)
40 {
41 	struct xe_tile *tile = xe_device_get_root_tile(xe);
42 	struct xe_gt *gt = tile->media_gt;
43 	bool ret = true;
44 
45 	if (!xe_uc_fw_is_enabled(&gt->uc.gsc.fw))
46 		return false;
47 
48 	xe_pm_runtime_get(xe);
49 	if (xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC)) {
50 		drm_dbg_kms(&xe->drm,
51 			    "failed to get forcewake to check proxy status\n");
52 		ret = false;
53 		goto out;
54 	}
55 
56 	if (!xe_gsc_proxy_init_done(&gt->uc.gsc))
57 		ret = false;
58 
59 	xe_force_wake_put(gt_to_fw(gt), XE_FW_GSC);
60 out:
61 	xe_pm_runtime_put(xe);
62 	return ret;
63 }
64 
65 /*This function helps allocate memory for the command that we will send to gsc cs */
66 static int intel_hdcp_gsc_initialize_message(struct xe_device *xe,
67 					     struct intel_hdcp_gsc_message *hdcp_message)
68 {
69 	struct xe_bo *bo = NULL;
70 	u64 cmd_in, cmd_out;
71 	int ret = 0;
72 
73 	/* allocate object of two page for HDCP command memory and store it */
74 	bo = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe), NULL, PAGE_SIZE * 2,
75 				  ttm_bo_type_kernel,
76 				  XE_BO_FLAG_SYSTEM |
77 				  XE_BO_FLAG_GGTT);
78 
79 	if (IS_ERR(bo)) {
80 		drm_err(&xe->drm, "Failed to allocate bo for HDCP streaming command!\n");
81 		ret = PTR_ERR(bo);
82 		goto out;
83 	}
84 
85 	cmd_in = xe_bo_ggtt_addr(bo);
86 	cmd_out = cmd_in + PAGE_SIZE;
87 	xe_map_memset(xe, &bo->vmap, 0, 0, bo->size);
88 
89 	hdcp_message->hdcp_bo = bo;
90 	hdcp_message->hdcp_cmd_in = cmd_in;
91 	hdcp_message->hdcp_cmd_out = cmd_out;
92 out:
93 	return ret;
94 }
95 
96 static int intel_hdcp_gsc_hdcp2_init(struct xe_device *xe)
97 {
98 	struct intel_hdcp_gsc_message *hdcp_message;
99 	int ret;
100 
101 	hdcp_message = kzalloc(sizeof(*hdcp_message), GFP_KERNEL);
102 
103 	if (!hdcp_message)
104 		return -ENOMEM;
105 
106 	/*
107 	 * NOTE: No need to lock the comp mutex here as it is already
108 	 * going to be taken before this function called
109 	 */
110 	ret = intel_hdcp_gsc_initialize_message(xe, hdcp_message);
111 	if (ret) {
112 		drm_err(&xe->drm, "Could not initialize hdcp_message\n");
113 		kfree(hdcp_message);
114 		return ret;
115 	}
116 
117 	xe->display.hdcp.hdcp_message = hdcp_message;
118 	return ret;
119 }
120 
121 static const struct i915_hdcp_ops gsc_hdcp_ops = {
122 	.initiate_hdcp2_session = intel_hdcp_gsc_initiate_session,
123 	.verify_receiver_cert_prepare_km =
124 				intel_hdcp_gsc_verify_receiver_cert_prepare_km,
125 	.verify_hprime = intel_hdcp_gsc_verify_hprime,
126 	.store_pairing_info = intel_hdcp_gsc_store_pairing_info,
127 	.initiate_locality_check = intel_hdcp_gsc_initiate_locality_check,
128 	.verify_lprime = intel_hdcp_gsc_verify_lprime,
129 	.get_session_key = intel_hdcp_gsc_get_session_key,
130 	.repeater_check_flow_prepare_ack =
131 				intel_hdcp_gsc_repeater_check_flow_prepare_ack,
132 	.verify_mprime = intel_hdcp_gsc_verify_mprime,
133 	.enable_hdcp_authentication = intel_hdcp_gsc_enable_authentication,
134 	.close_hdcp_session = intel_hdcp_gsc_close_session,
135 };
136 
137 int intel_hdcp_gsc_init(struct xe_device *xe)
138 {
139 	struct i915_hdcp_arbiter *data;
140 	int ret;
141 
142 	data = kzalloc(sizeof(*data), GFP_KERNEL);
143 	if (!data)
144 		return -ENOMEM;
145 
146 	mutex_lock(&xe->display.hdcp.hdcp_mutex);
147 	xe->display.hdcp.arbiter = data;
148 	xe->display.hdcp.arbiter->hdcp_dev = xe->drm.dev;
149 	xe->display.hdcp.arbiter->ops = &gsc_hdcp_ops;
150 	ret = intel_hdcp_gsc_hdcp2_init(xe);
151 	if (ret)
152 		kfree(data);
153 
154 	mutex_unlock(&xe->display.hdcp.hdcp_mutex);
155 
156 	return ret;
157 }
158 
159 void intel_hdcp_gsc_fini(struct xe_device *xe)
160 {
161 	struct intel_hdcp_gsc_message *hdcp_message =
162 					xe->display.hdcp.hdcp_message;
163 	struct i915_hdcp_arbiter *arb = xe->display.hdcp.arbiter;
164 
165 	if (hdcp_message) {
166 		xe_bo_unpin_map_no_vm(hdcp_message->hdcp_bo);
167 		kfree(hdcp_message);
168 		xe->display.hdcp.hdcp_message = NULL;
169 	}
170 
171 	kfree(arb);
172 	xe->display.hdcp.arbiter = NULL;
173 }
174 
175 static int xe_gsc_send_sync(struct xe_device *xe,
176 			    struct intel_hdcp_gsc_message *hdcp_message,
177 			    u32 msg_size_in, u32 msg_size_out,
178 			    u32 addr_out_off)
179 {
180 	struct xe_gt *gt = hdcp_message->hdcp_bo->tile->media_gt;
181 	struct iosys_map *map = &hdcp_message->hdcp_bo->vmap;
182 	struct xe_gsc *gsc = &gt->uc.gsc;
183 	int ret;
184 
185 	ret = xe_gsc_pkt_submit_kernel(gsc, hdcp_message->hdcp_cmd_in, msg_size_in,
186 				       hdcp_message->hdcp_cmd_out, msg_size_out);
187 	if (ret) {
188 		drm_err(&xe->drm, "failed to send gsc HDCP msg (%d)\n", ret);
189 		return ret;
190 	}
191 
192 	if (xe_gsc_check_and_update_pending(xe, map, 0, map, addr_out_off))
193 		return -EAGAIN;
194 
195 	ret = xe_gsc_read_out_header(xe, map, addr_out_off,
196 				     sizeof(struct hdcp_cmd_header), NULL);
197 
198 	return ret;
199 }
200 
201 ssize_t intel_hdcp_gsc_msg_send(struct xe_device *xe, u8 *msg_in,
202 				size_t msg_in_len, u8 *msg_out,
203 				size_t msg_out_len)
204 {
205 	const size_t max_msg_size = PAGE_SIZE - HDCP_GSC_HEADER_SIZE;
206 	struct intel_hdcp_gsc_message *hdcp_message;
207 	u64 host_session_id;
208 	u32 msg_size_in, msg_size_out;
209 	u32 addr_out_off, addr_in_wr_off = 0;
210 	int ret, tries = 0;
211 
212 	if (msg_in_len > max_msg_size || msg_out_len > max_msg_size) {
213 		ret = -ENOSPC;
214 		goto out;
215 	}
216 
217 	msg_size_in = msg_in_len + HDCP_GSC_HEADER_SIZE;
218 	msg_size_out = msg_out_len + HDCP_GSC_HEADER_SIZE;
219 	hdcp_message = xe->display.hdcp.hdcp_message;
220 	addr_out_off = PAGE_SIZE;
221 
222 	host_session_id = xe_gsc_create_host_session_id();
223 	xe_pm_runtime_get_noresume(xe);
224 	addr_in_wr_off = xe_gsc_emit_header(xe, &hdcp_message->hdcp_bo->vmap,
225 					    addr_in_wr_off, HECI_MEADDRESS_HDCP,
226 					    host_session_id, msg_in_len);
227 	xe_map_memcpy_to(xe, &hdcp_message->hdcp_bo->vmap, addr_in_wr_off,
228 			 msg_in, msg_in_len);
229 	/*
230 	 * Keep sending request in case the pending bit is set no need to add
231 	 * message handle as we are using same address hence loc. of header is
232 	 * same and it will contain the message handle. we will send the message
233 	 * 20 times each message 50 ms apart
234 	 */
235 	do {
236 		ret = xe_gsc_send_sync(xe, hdcp_message, msg_size_in, msg_size_out,
237 				       addr_out_off);
238 
239 		/* Only try again if gsc says so */
240 		if (ret != -EAGAIN)
241 			break;
242 
243 		msleep(50);
244 
245 	} while (++tries < 20);
246 
247 	if (ret)
248 		goto out;
249 
250 	xe_map_memcpy_from(xe, msg_out, &hdcp_message->hdcp_bo->vmap,
251 			   addr_out_off + HDCP_GSC_HEADER_SIZE,
252 			   msg_out_len);
253 
254 out:
255 	xe_pm_runtime_put(xe);
256 	return ret;
257 }
258