1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright 2023, Intel Corporation. 4 */ 5 6 #include <drm/drm_print.h> 7 #include <drm/intel/i915_hdcp_interface.h> 8 #include <linux/delay.h> 9 10 #include "abi/gsc_command_header_abi.h" 11 #include "intel_hdcp_gsc.h" 12 #include "xe_bo.h" 13 #include "xe_device.h" 14 #include "xe_device_types.h" 15 #include "xe_force_wake.h" 16 #include "xe_gsc_proxy.h" 17 #include "xe_gsc_submit.h" 18 #include "xe_map.h" 19 #include "xe_pm.h" 20 #include "xe_uc_fw.h" 21 22 #define HECI_MEADDRESS_HDCP 18 23 24 struct intel_hdcp_gsc_context { 25 struct xe_device *xe; 26 struct xe_bo *hdcp_bo; 27 u64 hdcp_cmd_in; 28 u64 hdcp_cmd_out; 29 }; 30 31 #define HDCP_GSC_HEADER_SIZE sizeof(struct intel_gsc_mtl_header) 32 33 bool intel_hdcp_gsc_check_status(struct drm_device *drm) 34 { 35 struct xe_device *xe = to_xe_device(drm); 36 struct xe_tile *tile = xe_device_get_root_tile(xe); 37 struct xe_gt *gt = tile->media_gt; 38 struct xe_gsc *gsc = >->uc.gsc; 39 bool ret = true; 40 unsigned int fw_ref; 41 42 if (!gsc || !xe_uc_fw_is_enabled(&gsc->fw)) { 43 drm_dbg_kms(&xe->drm, 44 "GSC Components not ready for HDCP2.x\n"); 45 return false; 46 } 47 48 xe_pm_runtime_get(xe); 49 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC); 50 if (!fw_ref) { 51 drm_dbg_kms(&xe->drm, 52 "failed to get forcewake to check proxy status\n"); 53 ret = false; 54 goto out; 55 } 56 57 if (!xe_gsc_proxy_init_done(gsc)) 58 ret = false; 59 60 xe_force_wake_put(gt_to_fw(gt), fw_ref); 61 out: 62 xe_pm_runtime_put(xe); 63 return ret; 64 } 65 66 /*This function helps allocate memory for the command that we will send to gsc cs */ 67 static int intel_hdcp_gsc_initialize_message(struct xe_device *xe, 68 struct intel_hdcp_gsc_context *gsc_context) 69 { 70 struct xe_bo *bo = NULL; 71 u64 cmd_in, cmd_out; 72 int ret = 0; 73 74 /* allocate object of two page for HDCP command memory and store it */ 75 bo = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe), NULL, PAGE_SIZE * 2, 76 ttm_bo_type_kernel, 77 XE_BO_FLAG_SYSTEM | 78 XE_BO_FLAG_GGTT); 79 80 if (IS_ERR(bo)) { 81 drm_err(&xe->drm, "Failed to allocate bo for HDCP streaming command!\n"); 82 ret = PTR_ERR(bo); 83 goto out; 84 } 85 86 cmd_in = xe_bo_ggtt_addr(bo); 87 cmd_out = cmd_in + PAGE_SIZE; 88 xe_map_memset(xe, &bo->vmap, 0, 0, xe_bo_size(bo)); 89 90 gsc_context->hdcp_bo = bo; 91 gsc_context->hdcp_cmd_in = cmd_in; 92 gsc_context->hdcp_cmd_out = cmd_out; 93 gsc_context->xe = xe; 94 95 out: 96 return ret; 97 } 98 99 struct intel_hdcp_gsc_context *intel_hdcp_gsc_context_alloc(struct drm_device *drm) 100 { 101 struct xe_device *xe = to_xe_device(drm); 102 struct intel_hdcp_gsc_context *gsc_context; 103 int ret; 104 105 gsc_context = kzalloc(sizeof(*gsc_context), GFP_KERNEL); 106 if (!gsc_context) 107 return ERR_PTR(-ENOMEM); 108 109 /* 110 * NOTE: No need to lock the comp mutex here as it is already 111 * going to be taken before this function called 112 */ 113 ret = intel_hdcp_gsc_initialize_message(xe, gsc_context); 114 if (ret) { 115 drm_err(&xe->drm, "Could not initialize gsc_context\n"); 116 kfree(gsc_context); 117 gsc_context = ERR_PTR(ret); 118 } 119 120 return gsc_context; 121 } 122 123 void intel_hdcp_gsc_context_free(struct intel_hdcp_gsc_context *gsc_context) 124 { 125 if (!gsc_context) 126 return; 127 128 xe_bo_unpin_map_no_vm(gsc_context->hdcp_bo); 129 kfree(gsc_context); 130 } 131 132 static int xe_gsc_send_sync(struct xe_device *xe, 133 struct intel_hdcp_gsc_context *gsc_context, 134 u32 msg_size_in, u32 msg_size_out, 135 u32 addr_out_off) 136 { 137 struct xe_gt *gt = gsc_context->hdcp_bo->tile->media_gt; 138 struct iosys_map *map = &gsc_context->hdcp_bo->vmap; 139 struct xe_gsc *gsc = >->uc.gsc; 140 int ret; 141 142 ret = xe_gsc_pkt_submit_kernel(gsc, gsc_context->hdcp_cmd_in, msg_size_in, 143 gsc_context->hdcp_cmd_out, msg_size_out); 144 if (ret) { 145 drm_err(&xe->drm, "failed to send gsc HDCP msg (%d)\n", ret); 146 return ret; 147 } 148 149 if (xe_gsc_check_and_update_pending(xe, map, 0, map, addr_out_off)) 150 return -EAGAIN; 151 152 ret = xe_gsc_read_out_header(xe, map, addr_out_off, 153 sizeof(struct hdcp_cmd_header), NULL); 154 155 return ret; 156 } 157 158 ssize_t intel_hdcp_gsc_msg_send(struct intel_hdcp_gsc_context *gsc_context, 159 void *msg_in, size_t msg_in_len, 160 void *msg_out, size_t msg_out_len) 161 { 162 struct xe_device *xe = gsc_context->xe; 163 const size_t max_msg_size = PAGE_SIZE - HDCP_GSC_HEADER_SIZE; 164 u64 host_session_id; 165 u32 msg_size_in, msg_size_out; 166 u32 addr_out_off, addr_in_wr_off = 0; 167 int ret, tries = 0; 168 169 if (msg_in_len > max_msg_size || msg_out_len > max_msg_size) { 170 ret = -ENOSPC; 171 goto out; 172 } 173 174 msg_size_in = msg_in_len + HDCP_GSC_HEADER_SIZE; 175 msg_size_out = msg_out_len + HDCP_GSC_HEADER_SIZE; 176 addr_out_off = PAGE_SIZE; 177 178 host_session_id = xe_gsc_create_host_session_id(); 179 xe_pm_runtime_get_noresume(xe); 180 addr_in_wr_off = xe_gsc_emit_header(xe, &gsc_context->hdcp_bo->vmap, 181 addr_in_wr_off, HECI_MEADDRESS_HDCP, 182 host_session_id, msg_in_len); 183 xe_map_memcpy_to(xe, &gsc_context->hdcp_bo->vmap, addr_in_wr_off, 184 msg_in, msg_in_len); 185 /* 186 * Keep sending request in case the pending bit is set no need to add 187 * message handle as we are using same address hence loc. of header is 188 * same and it will contain the message handle. we will send the message 189 * 20 times each message 50 ms apart 190 */ 191 do { 192 ret = xe_gsc_send_sync(xe, gsc_context, msg_size_in, msg_size_out, 193 addr_out_off); 194 195 /* Only try again if gsc says so */ 196 if (ret != -EAGAIN) 197 break; 198 199 msleep(50); 200 201 } while (++tries < 20); 202 203 if (ret) 204 goto out; 205 206 xe_map_memcpy_from(xe, msg_out, &gsc_context->hdcp_bo->vmap, 207 addr_out_off + HDCP_GSC_HEADER_SIZE, 208 msg_out_len); 209 210 out: 211 xe_pm_runtime_put(xe); 212 return ret; 213 } 214