xref: /linux/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c (revision bf4afc53b77aeaa48b5409da5c8da6bb4eff7f43)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright 2023, Intel Corporation.
4  */
5 
6 #include <linux/delay.h>
7 
8 #include <drm/drm_print.h>
9 #include <drm/intel/display_parent_interface.h>
10 #include <drm/intel/i915_hdcp_interface.h>
11 
12 #include "abi/gsc_command_header_abi.h"
13 #include "xe_bo.h"
14 #include "xe_device.h"
15 #include "xe_device_types.h"
16 #include "xe_force_wake.h"
17 #include "xe_gsc_proxy.h"
18 #include "xe_gsc_submit.h"
19 #include "xe_hdcp_gsc.h"
20 #include "xe_map.h"
21 #include "xe_pm.h"
22 #include "xe_uc_fw.h"
23 
24 #define HECI_MEADDRESS_HDCP 18
25 
26 struct intel_hdcp_gsc_context {
27 	struct xe_device *xe;
28 	struct xe_bo *hdcp_bo;
29 	u64 hdcp_cmd_in;
30 	u64 hdcp_cmd_out;
31 };
32 
33 #define HDCP_GSC_HEADER_SIZE sizeof(struct intel_gsc_mtl_header)
34 
intel_hdcp_gsc_check_status(struct drm_device * drm)35 static bool intel_hdcp_gsc_check_status(struct drm_device *drm)
36 {
37 	struct xe_device *xe = to_xe_device(drm);
38 	struct xe_tile *tile = xe_device_get_root_tile(xe);
39 	struct xe_gt *gt = tile->media_gt;
40 	struct xe_gsc *gsc = &gt->uc.gsc;
41 
42 	if (!gsc || !xe_uc_fw_is_available(&gsc->fw)) {
43 		drm_dbg_kms(&xe->drm,
44 			    "GSC Components not ready for HDCP2.x\n");
45 		return false;
46 	}
47 
48 	guard(xe_pm_runtime)(xe);
49 	CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GSC);
50 	if (!fw_ref.domains) {
51 		drm_dbg_kms(&xe->drm,
52 			    "failed to get forcewake to check proxy status\n");
53 		return false;
54 	}
55 
56 	return xe_gsc_proxy_init_done(gsc);
57 }
58 
59 /*This function helps allocate memory for the command that we will send to gsc cs */
intel_hdcp_gsc_initialize_message(struct xe_device * xe,struct intel_hdcp_gsc_context * gsc_context)60 static int intel_hdcp_gsc_initialize_message(struct xe_device *xe,
61 					     struct intel_hdcp_gsc_context *gsc_context)
62 {
63 	struct xe_bo *bo = NULL;
64 	u64 cmd_in, cmd_out;
65 	int ret = 0;
66 
67 	/* allocate object of two page for HDCP command memory and store it */
68 	bo = xe_bo_create_pin_map_novm(xe, xe_device_get_root_tile(xe), PAGE_SIZE * 2,
69 				       ttm_bo_type_kernel,
70 				       XE_BO_FLAG_SYSTEM |
71 				       XE_BO_FLAG_GGTT, false);
72 
73 	if (IS_ERR(bo)) {
74 		drm_err(&xe->drm, "Failed to allocate bo for HDCP streaming command!\n");
75 		ret = PTR_ERR(bo);
76 		goto out;
77 	}
78 
79 	cmd_in = xe_bo_ggtt_addr(bo);
80 	cmd_out = cmd_in + PAGE_SIZE;
81 	xe_map_memset(xe, &bo->vmap, 0, 0, xe_bo_size(bo));
82 
83 	gsc_context->hdcp_bo = bo;
84 	gsc_context->hdcp_cmd_in = cmd_in;
85 	gsc_context->hdcp_cmd_out = cmd_out;
86 	gsc_context->xe = xe;
87 
88 out:
89 	return ret;
90 }
91 
intel_hdcp_gsc_context_alloc(struct drm_device * drm)92 static struct intel_hdcp_gsc_context *intel_hdcp_gsc_context_alloc(struct drm_device *drm)
93 {
94 	struct xe_device *xe = to_xe_device(drm);
95 	struct intel_hdcp_gsc_context *gsc_context;
96 	int ret;
97 
98 	gsc_context = kzalloc_obj(*gsc_context);
99 	if (!gsc_context)
100 		return ERR_PTR(-ENOMEM);
101 
102 	/*
103 	 * NOTE: No need to lock the comp mutex here as it is already
104 	 * going to be taken before this function called
105 	 */
106 	ret = intel_hdcp_gsc_initialize_message(xe, gsc_context);
107 	if (ret) {
108 		drm_err(&xe->drm, "Could not initialize gsc_context\n");
109 		kfree(gsc_context);
110 		gsc_context = ERR_PTR(ret);
111 	}
112 
113 	return gsc_context;
114 }
115 
intel_hdcp_gsc_context_free(struct intel_hdcp_gsc_context * gsc_context)116 static void intel_hdcp_gsc_context_free(struct intel_hdcp_gsc_context *gsc_context)
117 {
118 	if (!gsc_context)
119 		return;
120 
121 	xe_bo_unpin_map_no_vm(gsc_context->hdcp_bo);
122 	kfree(gsc_context);
123 }
124 
xe_gsc_send_sync(struct xe_device * xe,struct intel_hdcp_gsc_context * gsc_context,u32 msg_size_in,u32 msg_size_out,u32 addr_out_off)125 static int xe_gsc_send_sync(struct xe_device *xe,
126 			    struct intel_hdcp_gsc_context *gsc_context,
127 			    u32 msg_size_in, u32 msg_size_out,
128 			    u32 addr_out_off)
129 {
130 	struct xe_gt *gt = gsc_context->hdcp_bo->tile->media_gt;
131 	struct iosys_map *map = &gsc_context->hdcp_bo->vmap;
132 	struct xe_gsc *gsc = &gt->uc.gsc;
133 	int ret;
134 
135 	ret = xe_gsc_pkt_submit_kernel(gsc, gsc_context->hdcp_cmd_in, msg_size_in,
136 				       gsc_context->hdcp_cmd_out, msg_size_out);
137 	if (ret) {
138 		drm_err(&xe->drm, "failed to send gsc HDCP msg (%d)\n", ret);
139 		return ret;
140 	}
141 
142 	if (xe_gsc_check_and_update_pending(xe, map, 0, map, addr_out_off))
143 		return -EAGAIN;
144 
145 	ret = xe_gsc_read_out_header(xe, map, addr_out_off,
146 				     sizeof(struct hdcp_cmd_header), NULL);
147 
148 	return ret;
149 }
150 
intel_hdcp_gsc_msg_send(struct intel_hdcp_gsc_context * gsc_context,void * msg_in,size_t msg_in_len,void * msg_out,size_t msg_out_len)151 static ssize_t intel_hdcp_gsc_msg_send(struct intel_hdcp_gsc_context *gsc_context,
152 				       void *msg_in, size_t msg_in_len,
153 				       void *msg_out, size_t msg_out_len)
154 {
155 	struct xe_device *xe = gsc_context->xe;
156 	const size_t max_msg_size = PAGE_SIZE - HDCP_GSC_HEADER_SIZE;
157 	u64 host_session_id;
158 	u32 msg_size_in, msg_size_out;
159 	u32 addr_out_off, addr_in_wr_off = 0;
160 	int ret, tries = 0;
161 
162 	if (msg_in_len > max_msg_size || msg_out_len > max_msg_size)
163 		return -ENOSPC;
164 
165 	msg_size_in = msg_in_len + HDCP_GSC_HEADER_SIZE;
166 	msg_size_out = msg_out_len + HDCP_GSC_HEADER_SIZE;
167 	addr_out_off = PAGE_SIZE;
168 
169 	host_session_id = xe_gsc_create_host_session_id();
170 	guard(xe_pm_runtime_noresume)(xe);
171 	addr_in_wr_off = xe_gsc_emit_header(xe, &gsc_context->hdcp_bo->vmap,
172 					    addr_in_wr_off, HECI_MEADDRESS_HDCP,
173 					    host_session_id, msg_in_len);
174 	xe_map_memcpy_to(xe, &gsc_context->hdcp_bo->vmap, addr_in_wr_off,
175 			 msg_in, msg_in_len);
176 	/*
177 	 * Keep sending request in case the pending bit is set no need to add
178 	 * message handle as we are using same address hence loc. of header is
179 	 * same and it will contain the message handle. we will send the message
180 	 * 20 times each message 50 ms apart
181 	 */
182 	do {
183 		ret = xe_gsc_send_sync(xe, gsc_context, msg_size_in, msg_size_out,
184 				       addr_out_off);
185 
186 		/* Only try again if gsc says so */
187 		if (ret != -EAGAIN)
188 			break;
189 
190 		msleep(50);
191 
192 	} while (++tries < 20);
193 
194 	if (ret)
195 		return ret;
196 
197 	xe_map_memcpy_from(xe, msg_out, &gsc_context->hdcp_bo->vmap,
198 			   addr_out_off + HDCP_GSC_HEADER_SIZE,
199 			   msg_out_len);
200 
201 	return ret;
202 }
203 
204 const struct intel_display_hdcp_interface xe_display_hdcp_interface = {
205 	.gsc_msg_send = intel_hdcp_gsc_msg_send,
206 	.gsc_check_status = intel_hdcp_gsc_check_status,
207 	.gsc_context_alloc = intel_hdcp_gsc_context_alloc,
208 	.gsc_context_free = intel_hdcp_gsc_context_free,
209 };
210