1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2023 Intel Corporation 4 */ 5 6 #include "xe_gsc_submit.h" 7 8 #include <linux/poison.h> 9 10 #include "abi/gsc_command_header_abi.h" 11 #include "xe_bb.h" 12 #include "xe_exec_queue.h" 13 #include "xe_gt_printk.h" 14 #include "xe_gt_types.h" 15 #include "xe_map.h" 16 #include "xe_sched_job.h" 17 #include "instructions/xe_gsc_commands.h" 18 #include "regs/xe_gsc_regs.h" 19 20 #define GSC_HDR_SIZE (sizeof(struct intel_gsc_mtl_header)) /* shorthand define */ 21 22 #define mtl_gsc_header_wr(xe_, map_, offset_, field_, val_) \ 23 xe_map_wr_field(xe_, map_, offset_, struct intel_gsc_mtl_header, field_, val_) 24 25 #define mtl_gsc_header_rd(xe_, map_, offset_, field_) \ 26 xe_map_rd_field(xe_, map_, offset_, struct intel_gsc_mtl_header, field_) 27 28 /* 29 * GSC FW allows us to define the host_session_handle as we see fit, as long 30 * as we use unique identifier for each user, with handle 0 being reserved for 31 * kernel usage. 32 * To be able to differentiate which client subsystem owns the given session, we 33 * include the client id in the top 8 bits of the handle. 34 */ 35 #define HOST_SESSION_CLIENT_MASK GENMASK_ULL(63, 56) 36 37 static struct xe_gt * 38 gsc_to_gt(struct xe_gsc *gsc) 39 { 40 return container_of(gsc, struct xe_gt, uc.gsc); 41 } 42 43 /** 44 * xe_gsc_emit_header - write the MTL GSC header in memory 45 * @xe: the Xe device 46 * @map: the iosys map to write to 47 * @offset: offset from the start of the map at which to write the header 48 * @heci_client_id: client id identifying the type of command (see abi for values) 49 * @host_session_id: host session ID of the caller 50 * @payload_size: size of the payload that follows the header 51 * 52 * Returns: offset memory location following the header 53 */ 54 u32 xe_gsc_emit_header(struct xe_device *xe, struct iosys_map *map, u32 offset, 55 u8 heci_client_id, u64 host_session_id, u32 payload_size) 56 { 57 xe_assert(xe, !(host_session_id & HOST_SESSION_CLIENT_MASK)); 58 59 if (host_session_id) 60 host_session_id |= FIELD_PREP(HOST_SESSION_CLIENT_MASK, heci_client_id); 61 62 xe_map_memset(xe, map, offset, 0, GSC_HDR_SIZE); 63 64 mtl_gsc_header_wr(xe, map, offset, validity_marker, GSC_HECI_VALIDITY_MARKER); 65 mtl_gsc_header_wr(xe, map, offset, heci_client_id, heci_client_id); 66 mtl_gsc_header_wr(xe, map, offset, host_session_handle, host_session_id); 67 mtl_gsc_header_wr(xe, map, offset, header_version, MTL_GSC_HEADER_VERSION); 68 mtl_gsc_header_wr(xe, map, offset, message_size, payload_size + GSC_HDR_SIZE); 69 70 return offset + GSC_HDR_SIZE; 71 }; 72 73 /** 74 * xe_gsc_poison_header - poison the MTL GSC header in memory 75 * @xe: the Xe device 76 * @map: the iosys map to write to 77 * @offset: offset from the start of the map at which the header resides 78 */ 79 void xe_gsc_poison_header(struct xe_device *xe, struct iosys_map *map, u32 offset) 80 { 81 xe_map_memset(xe, map, offset, POISON_FREE, GSC_HDR_SIZE); 82 }; 83 84 /** 85 * xe_gsc_check_and_update_pending - check the pending bit and update the input 86 * header with the retry handle from the output header 87 * @xe: the Xe device 88 * @in: the iosys map containing the input buffer 89 * @offset_in: offset within the iosys at which the input buffer is located 90 * @out: the iosys map containing the output buffer 91 * @offset_out: offset within the iosys at which the output buffer is located 92 * 93 * Returns: true if the pending bit was set, false otherwise 94 */ 95 bool xe_gsc_check_and_update_pending(struct xe_device *xe, 96 struct iosys_map *in, u32 offset_in, 97 struct iosys_map *out, u32 offset_out) 98 { 99 if (mtl_gsc_header_rd(xe, out, offset_out, flags) & GSC_OUTFLAG_MSG_PENDING) { 100 u64 handle = mtl_gsc_header_rd(xe, out, offset_out, gsc_message_handle); 101 102 mtl_gsc_header_wr(xe, in, offset_in, gsc_message_handle, handle); 103 104 return true; 105 } 106 107 return false; 108 } 109 110 /** 111 * xe_gsc_read_out_header - reads and validates the output header and returns 112 * the offset of the reply following the header 113 * @xe: the Xe device 114 * @map: the iosys map containing the output buffer 115 * @offset: offset within the iosys at which the output buffer is located 116 * @min_payload_size: minimum size of the message excluding the gsc header 117 * @payload_offset: optional pointer to be set to the payload offset 118 * 119 * Returns: -errno value on failure, 0 otherwise 120 */ 121 int xe_gsc_read_out_header(struct xe_device *xe, 122 struct iosys_map *map, u32 offset, 123 u32 min_payload_size, 124 u32 *payload_offset) 125 { 126 u32 marker = mtl_gsc_header_rd(xe, map, offset, validity_marker); 127 u32 size = mtl_gsc_header_rd(xe, map, offset, message_size); 128 u32 status = mtl_gsc_header_rd(xe, map, offset, status); 129 u32 payload_size = size - GSC_HDR_SIZE; 130 131 if (marker != GSC_HECI_VALIDITY_MARKER) 132 return -EPROTO; 133 134 if (status != 0) { 135 drm_err(&xe->drm, "GSC header readout indicates error: %d\n", 136 status); 137 return -EINVAL; 138 } 139 140 if (size < GSC_HDR_SIZE || payload_size < min_payload_size) 141 return -ENODATA; 142 143 if (payload_offset) 144 *payload_offset = offset + GSC_HDR_SIZE; 145 146 return 0; 147 } 148 149 /** 150 * xe_gsc_pkt_submit_kernel - submit a kernel heci pkt to the GSC 151 * @gsc: the GSC uC 152 * @addr_in: GGTT address of the message to send to the GSC 153 * @size_in: size of the message to send to the GSC 154 * @addr_out: GGTT address for the GSC to write the reply to 155 * @size_out: size of the memory reserved for the reply 156 */ 157 int xe_gsc_pkt_submit_kernel(struct xe_gsc *gsc, u64 addr_in, u32 size_in, 158 u64 addr_out, u32 size_out) 159 { 160 struct xe_gt *gt = gsc_to_gt(gsc); 161 struct xe_bb *bb; 162 struct xe_sched_job *job; 163 struct dma_fence *fence; 164 long timeout; 165 166 if (size_in < GSC_HDR_SIZE) 167 return -ENODATA; 168 169 if (size_out < GSC_HDR_SIZE) 170 return -ENOMEM; 171 172 bb = xe_bb_new(gt, 8, false); 173 if (IS_ERR(bb)) 174 return PTR_ERR(bb); 175 176 bb->cs[bb->len++] = GSC_HECI_CMD_PKT; 177 bb->cs[bb->len++] = lower_32_bits(addr_in); 178 bb->cs[bb->len++] = upper_32_bits(addr_in); 179 bb->cs[bb->len++] = size_in; 180 bb->cs[bb->len++] = lower_32_bits(addr_out); 181 bb->cs[bb->len++] = upper_32_bits(addr_out); 182 bb->cs[bb->len++] = size_out; 183 bb->cs[bb->len++] = 0; 184 185 job = xe_bb_create_job(gsc->q, bb); 186 if (IS_ERR(job)) { 187 xe_bb_free(bb, NULL); 188 return PTR_ERR(job); 189 } 190 191 xe_sched_job_arm(job); 192 fence = dma_fence_get(&job->drm.s_fence->finished); 193 xe_sched_job_push(job); 194 195 timeout = dma_fence_wait_timeout(fence, false, HZ); 196 dma_fence_put(fence); 197 xe_bb_free(bb, NULL); 198 if (timeout < 0) 199 return timeout; 200 else if (!timeout) 201 return -ETIME; 202 203 return 0; 204 } 205