1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. 4 */ 5 6 #include <linux/pm_runtime.h> 7 8 #include "iris_core.h" 9 #include "iris_hfi_queue.h" 10 #include "iris_vpu_common.h" 11 12 static int iris_hfi_queue_write(struct iris_iface_q_info *qinfo, void *packet, u32 packet_size) 13 { 14 struct iris_hfi_queue_header *queue = qinfo->qhdr; 15 u32 write_idx = queue->write_idx * sizeof(u32); 16 u32 read_idx = queue->read_idx * sizeof(u32); 17 u32 empty_space, new_write_idx, residue; 18 u32 *write_ptr; 19 20 if (write_idx < read_idx) 21 empty_space = read_idx - write_idx; 22 else 23 empty_space = IFACEQ_QUEUE_SIZE - (write_idx - read_idx); 24 if (empty_space < packet_size) 25 return -ENOSPC; 26 27 queue->tx_req = 0; 28 29 new_write_idx = write_idx + packet_size; 30 write_ptr = (u32 *)((u8 *)qinfo->kernel_vaddr + write_idx); 31 32 if (write_ptr < (u32 *)qinfo->kernel_vaddr || 33 write_ptr > (u32 *)(qinfo->kernel_vaddr + 34 IFACEQ_QUEUE_SIZE)) 35 return -EINVAL; 36 37 if (new_write_idx < IFACEQ_QUEUE_SIZE) { 38 memcpy(write_ptr, packet, packet_size); 39 } else { 40 residue = new_write_idx - IFACEQ_QUEUE_SIZE; 41 memcpy(write_ptr, packet, (packet_size - residue)); 42 memcpy(qinfo->kernel_vaddr, 43 packet + (packet_size - residue), residue); 44 new_write_idx = residue; 45 } 46 47 /* Make sure packet is written before updating the write index */ 48 mb(); 49 queue->write_idx = new_write_idx / sizeof(u32); 50 51 /* Make sure write index is updated before an interrupt is raised */ 52 mb(); 53 54 return 0; 55 } 56 57 static int iris_hfi_queue_read(struct iris_iface_q_info *qinfo, void *packet) 58 { 59 struct iris_hfi_queue_header *queue = qinfo->qhdr; 60 u32 write_idx = queue->write_idx * sizeof(u32); 61 u32 read_idx = queue->read_idx * sizeof(u32); 62 u32 packet_size, receive_request = 0; 63 u32 new_read_idx, residue; 64 u32 *read_ptr; 65 int ret = 0; 66 67 if (queue->queue_type == IFACEQ_MSGQ_ID) 68 receive_request = 1; 69 70 if (read_idx == write_idx) { 71 queue->rx_req = receive_request; 72 /* Ensure qhdr is updated in main memory */ 73 mb(); 74 return -ENODATA; 75 } 76 77 read_ptr = qinfo->kernel_vaddr + read_idx; 78 if (read_ptr < (u32 *)qinfo->kernel_vaddr || 79 read_ptr > (u32 *)(qinfo->kernel_vaddr + 80 IFACEQ_QUEUE_SIZE - sizeof(*read_ptr))) 81 return -ENODATA; 82 83 packet_size = *read_ptr; 84 if (!packet_size) 85 return -EINVAL; 86 87 new_read_idx = read_idx + packet_size; 88 if (packet_size <= IFACEQ_CORE_PKT_SIZE) { 89 if (new_read_idx < IFACEQ_QUEUE_SIZE) { 90 memcpy(packet, read_ptr, packet_size); 91 } else { 92 residue = new_read_idx - IFACEQ_QUEUE_SIZE; 93 memcpy(packet, read_ptr, (packet_size - residue)); 94 memcpy((packet + (packet_size - residue)), 95 qinfo->kernel_vaddr, residue); 96 new_read_idx = residue; 97 } 98 } else { 99 new_read_idx = write_idx; 100 ret = -EBADMSG; 101 } 102 103 queue->rx_req = receive_request; 104 105 queue->read_idx = new_read_idx / sizeof(u32); 106 /* Ensure qhdr is updated in main memory */ 107 mb(); 108 109 return ret; 110 } 111 112 int iris_hfi_queue_cmd_write_locked(struct iris_core *core, void *pkt, u32 pkt_size) 113 { 114 struct iris_iface_q_info *q_info = &core->command_queue; 115 116 if (core->state == IRIS_CORE_ERROR || core->state == IRIS_CORE_DEINIT) 117 return -EINVAL; 118 119 if (!iris_hfi_queue_write(q_info, pkt, pkt_size)) { 120 iris_vpu_raise_interrupt(core); 121 } else { 122 dev_err(core->dev, "queue full\n"); 123 return -ENODATA; 124 } 125 126 return 0; 127 } 128 129 int iris_hfi_queue_cmd_write(struct iris_core *core, void *pkt, u32 pkt_size) 130 { 131 int ret; 132 133 ret = pm_runtime_resume_and_get(core->dev); 134 if (ret < 0) 135 goto exit; 136 137 mutex_lock(&core->lock); 138 ret = iris_hfi_queue_cmd_write_locked(core, pkt, pkt_size); 139 if (ret) { 140 mutex_unlock(&core->lock); 141 goto exit; 142 } 143 mutex_unlock(&core->lock); 144 145 pm_runtime_mark_last_busy(core->dev); 146 pm_runtime_put_autosuspend(core->dev); 147 148 return 0; 149 150 exit: 151 pm_runtime_put_sync(core->dev); 152 153 return ret; 154 } 155 156 int iris_hfi_queue_msg_read(struct iris_core *core, void *pkt) 157 { 158 struct iris_iface_q_info *q_info = &core->message_queue; 159 int ret = 0; 160 161 mutex_lock(&core->lock); 162 if (core->state != IRIS_CORE_INIT) { 163 ret = -EINVAL; 164 goto unlock; 165 } 166 167 if (iris_hfi_queue_read(q_info, pkt)) { 168 ret = -ENODATA; 169 goto unlock; 170 } 171 172 unlock: 173 mutex_unlock(&core->lock); 174 175 return ret; 176 } 177 178 int iris_hfi_queue_dbg_read(struct iris_core *core, void *pkt) 179 { 180 struct iris_iface_q_info *q_info = &core->debug_queue; 181 int ret = 0; 182 183 mutex_lock(&core->lock); 184 if (core->state != IRIS_CORE_INIT) { 185 ret = -EINVAL; 186 goto unlock; 187 } 188 189 if (iris_hfi_queue_read(q_info, pkt)) { 190 ret = -ENODATA; 191 goto unlock; 192 } 193 194 unlock: 195 mutex_unlock(&core->lock); 196 197 return ret; 198 } 199 200 static void iris_hfi_queue_set_header(struct iris_core *core, u32 queue_id, 201 struct iris_iface_q_info *iface_q) 202 { 203 iface_q->qhdr->status = 0x1; 204 iface_q->qhdr->start_addr = iface_q->device_addr; 205 iface_q->qhdr->header_type = IFACEQ_DFLT_QHDR; 206 iface_q->qhdr->queue_type = queue_id; 207 iface_q->qhdr->q_size = IFACEQ_QUEUE_SIZE / sizeof(u32); 208 iface_q->qhdr->pkt_size = 0; /* variable packet size */ 209 iface_q->qhdr->rx_wm = 0x1; 210 iface_q->qhdr->tx_wm = 0x1; 211 iface_q->qhdr->rx_req = 0x1; 212 iface_q->qhdr->tx_req = 0x0; 213 iface_q->qhdr->rx_irq_status = 0x0; 214 iface_q->qhdr->tx_irq_status = 0x0; 215 iface_q->qhdr->read_idx = 0x0; 216 iface_q->qhdr->write_idx = 0x0; 217 218 /* 219 * Set receive request to zero on debug queue as there is no 220 * need of interrupt from video hardware for debug messages 221 */ 222 if (queue_id == IFACEQ_DBGQ_ID) 223 iface_q->qhdr->rx_req = 0; 224 } 225 226 static void 227 iris_hfi_queue_init(struct iris_core *core, u32 queue_id, struct iris_iface_q_info *iface_q) 228 { 229 struct iris_hfi_queue_table_header *q_tbl_hdr = core->iface_q_table_vaddr; 230 u32 offset = sizeof(*q_tbl_hdr) + (queue_id * IFACEQ_QUEUE_SIZE); 231 232 iface_q->device_addr = core->iface_q_table_daddr + offset; 233 iface_q->kernel_vaddr = 234 (void *)((char *)core->iface_q_table_vaddr + offset); 235 iface_q->qhdr = &q_tbl_hdr->q_hdr[queue_id]; 236 237 iris_hfi_queue_set_header(core, queue_id, iface_q); 238 } 239 240 static void iris_hfi_queue_deinit(struct iris_iface_q_info *iface_q) 241 { 242 iface_q->qhdr = NULL; 243 iface_q->kernel_vaddr = NULL; 244 iface_q->device_addr = 0; 245 } 246 247 int iris_hfi_queues_init(struct iris_core *core) 248 { 249 struct iris_hfi_queue_table_header *q_tbl_hdr; 250 u32 queue_size; 251 252 /* Iris hardware requires 4K queue alignment */ 253 queue_size = ALIGN((sizeof(*q_tbl_hdr) + (IFACEQ_QUEUE_SIZE * IFACEQ_NUMQ)), SZ_4K); 254 core->iface_q_table_vaddr = dma_alloc_attrs(core->dev, queue_size, 255 &core->iface_q_table_daddr, 256 GFP_KERNEL, DMA_ATTR_WRITE_COMBINE); 257 if (!core->iface_q_table_vaddr) { 258 dev_err(core->dev, "queues alloc and map failed\n"); 259 return -ENOMEM; 260 } 261 262 core->sfr_vaddr = dma_alloc_attrs(core->dev, SFR_SIZE, 263 &core->sfr_daddr, 264 GFP_KERNEL, DMA_ATTR_WRITE_COMBINE); 265 if (!core->sfr_vaddr) { 266 dev_err(core->dev, "sfr alloc and map failed\n"); 267 dma_free_attrs(core->dev, sizeof(*q_tbl_hdr), core->iface_q_table_vaddr, 268 core->iface_q_table_daddr, DMA_ATTR_WRITE_COMBINE); 269 return -ENOMEM; 270 } 271 272 iris_hfi_queue_init(core, IFACEQ_CMDQ_ID, &core->command_queue); 273 iris_hfi_queue_init(core, IFACEQ_MSGQ_ID, &core->message_queue); 274 iris_hfi_queue_init(core, IFACEQ_DBGQ_ID, &core->debug_queue); 275 276 q_tbl_hdr = (struct iris_hfi_queue_table_header *)core->iface_q_table_vaddr; 277 q_tbl_hdr->version = 0; 278 q_tbl_hdr->device_addr = (void *)core; 279 strscpy(q_tbl_hdr->name, "iris-hfi-queues", sizeof(q_tbl_hdr->name)); 280 q_tbl_hdr->size = sizeof(*q_tbl_hdr); 281 q_tbl_hdr->qhdr0_offset = sizeof(*q_tbl_hdr) - 282 (IFACEQ_NUMQ * sizeof(struct iris_hfi_queue_header)); 283 q_tbl_hdr->qhdr_size = sizeof(q_tbl_hdr->q_hdr[0]); 284 q_tbl_hdr->num_q = IFACEQ_NUMQ; 285 q_tbl_hdr->num_active_q = IFACEQ_NUMQ; 286 287 /* Write sfr size in first word to be used by firmware */ 288 *((u32 *)core->sfr_vaddr) = SFR_SIZE; 289 290 return 0; 291 } 292 293 void iris_hfi_queues_deinit(struct iris_core *core) 294 { 295 u32 queue_size; 296 297 if (!core->iface_q_table_vaddr) 298 return; 299 300 iris_hfi_queue_deinit(&core->debug_queue); 301 iris_hfi_queue_deinit(&core->message_queue); 302 iris_hfi_queue_deinit(&core->command_queue); 303 304 dma_free_attrs(core->dev, SFR_SIZE, core->sfr_vaddr, 305 core->sfr_daddr, DMA_ATTR_WRITE_COMBINE); 306 307 core->sfr_vaddr = NULL; 308 core->sfr_daddr = 0; 309 310 queue_size = ALIGN(sizeof(struct iris_hfi_queue_table_header) + 311 (IFACEQ_QUEUE_SIZE * IFACEQ_NUMQ), SZ_4K); 312 313 dma_free_attrs(core->dev, queue_size, core->iface_q_table_vaddr, 314 core->iface_q_table_daddr, DMA_ATTR_WRITE_COMBINE); 315 316 core->iface_q_table_vaddr = NULL; 317 core->iface_q_table_daddr = 0; 318 } 319