1 // SPDX-License-Identifier: GPL-2.0 or MIT
2 /* Copyright 2023 Collabora ltd. */
3
4 #ifdef CONFIG_ARM_ARCH_TIMER
5 #include <asm/arch_timer.h>
6 #endif
7
8 #include <linux/clk.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/firmware.h>
11 #include <linux/iopoll.h>
12 #include <linux/iosys-map.h>
13 #include <linux/mutex.h>
14 #include <linux/platform_device.h>
15 #include <linux/pm_runtime.h>
16
17 #include <drm/drm_drv.h>
18 #include <drm/drm_managed.h>
19
20 #include "panthor_device.h"
21 #include "panthor_fw.h"
22 #include "panthor_gem.h"
23 #include "panthor_gpu.h"
24 #include "panthor_mmu.h"
25 #include "panthor_regs.h"
26 #include "panthor_sched.h"
27
28 #define CSF_FW_NAME "mali_csffw.bin"
29
30 #define PING_INTERVAL_MS 12000
31 #define PROGRESS_TIMEOUT_CYCLES (5ull * 500 * 1024 * 1024)
32 #define PROGRESS_TIMEOUT_SCALE_SHIFT 10
33 #define IDLE_HYSTERESIS_US 800
34 #define PWROFF_HYSTERESIS_US 10000
35
36 /**
37 * struct panthor_fw_binary_hdr - Firmware binary header.
38 */
39 struct panthor_fw_binary_hdr {
40 /** @magic: Magic value to check binary validity. */
41 u32 magic;
42 #define CSF_FW_BINARY_HEADER_MAGIC 0xc3f13a6e
43
44 /** @minor: Minor FW version. */
45 u8 minor;
46
47 /** @major: Major FW version. */
48 u8 major;
49 #define CSF_FW_BINARY_HEADER_MAJOR_MAX 0
50
51 /** @padding1: MBZ. */
52 u16 padding1;
53
54 /** @version_hash: FW version hash. */
55 u32 version_hash;
56
57 /** @padding2: MBZ. */
58 u32 padding2;
59
60 /** @size: FW binary size. */
61 u32 size;
62 };
63
64 /**
65 * enum panthor_fw_binary_entry_type - Firmware binary entry type
66 */
67 enum panthor_fw_binary_entry_type {
68 /** @CSF_FW_BINARY_ENTRY_TYPE_IFACE: Host <-> FW interface. */
69 CSF_FW_BINARY_ENTRY_TYPE_IFACE = 0,
70
71 /** @CSF_FW_BINARY_ENTRY_TYPE_CONFIG: FW config. */
72 CSF_FW_BINARY_ENTRY_TYPE_CONFIG = 1,
73
74 /** @CSF_FW_BINARY_ENTRY_TYPE_FUTF_TEST: Unit-tests. */
75 CSF_FW_BINARY_ENTRY_TYPE_FUTF_TEST = 2,
76
77 /** @CSF_FW_BINARY_ENTRY_TYPE_TRACE_BUFFER: Trace buffer interface. */
78 CSF_FW_BINARY_ENTRY_TYPE_TRACE_BUFFER = 3,
79
80 /** @CSF_FW_BINARY_ENTRY_TYPE_TIMELINE_METADATA: Timeline metadata interface. */
81 CSF_FW_BINARY_ENTRY_TYPE_TIMELINE_METADATA = 4,
82
83 /**
84 * @CSF_FW_BINARY_ENTRY_TYPE_BUILD_INFO_METADATA: Metadata about how
85 * the FW binary was built.
86 */
87 CSF_FW_BINARY_ENTRY_TYPE_BUILD_INFO_METADATA = 6
88 };
89
90 #define CSF_FW_BINARY_ENTRY_TYPE(ehdr) ((ehdr) & 0xff)
91 #define CSF_FW_BINARY_ENTRY_SIZE(ehdr) (((ehdr) >> 8) & 0xff)
92 #define CSF_FW_BINARY_ENTRY_UPDATE BIT(30)
93 #define CSF_FW_BINARY_ENTRY_OPTIONAL BIT(31)
94
95 #define CSF_FW_BINARY_IFACE_ENTRY_RD BIT(0)
96 #define CSF_FW_BINARY_IFACE_ENTRY_WR BIT(1)
97 #define CSF_FW_BINARY_IFACE_ENTRY_EX BIT(2)
98 #define CSF_FW_BINARY_IFACE_ENTRY_CACHE_MODE_NONE (0 << 3)
99 #define CSF_FW_BINARY_IFACE_ENTRY_CACHE_MODE_CACHED (1 << 3)
100 #define CSF_FW_BINARY_IFACE_ENTRY_CACHE_MODE_UNCACHED_COHERENT (2 << 3)
101 #define CSF_FW_BINARY_IFACE_ENTRY_CACHE_MODE_CACHED_COHERENT (3 << 3)
102 #define CSF_FW_BINARY_IFACE_ENTRY_CACHE_MODE_MASK GENMASK(4, 3)
103 #define CSF_FW_BINARY_IFACE_ENTRY_PROT BIT(5)
104 #define CSF_FW_BINARY_IFACE_ENTRY_SHARED BIT(30)
105 #define CSF_FW_BINARY_IFACE_ENTRY_ZERO BIT(31)
106
107 #define CSF_FW_BINARY_IFACE_ENTRY_SUPPORTED_FLAGS \
108 (CSF_FW_BINARY_IFACE_ENTRY_RD | \
109 CSF_FW_BINARY_IFACE_ENTRY_WR | \
110 CSF_FW_BINARY_IFACE_ENTRY_EX | \
111 CSF_FW_BINARY_IFACE_ENTRY_CACHE_MODE_MASK | \
112 CSF_FW_BINARY_IFACE_ENTRY_PROT | \
113 CSF_FW_BINARY_IFACE_ENTRY_SHARED | \
114 CSF_FW_BINARY_IFACE_ENTRY_ZERO)
115
116 /**
117 * struct panthor_fw_binary_section_entry_hdr - Describes a section of FW binary
118 */
119 struct panthor_fw_binary_section_entry_hdr {
120 /** @flags: Section flags. */
121 u32 flags;
122
123 /** @va: MCU virtual range to map this binary section to. */
124 struct {
125 /** @start: Start address. */
126 u32 start;
127
128 /** @end: End address. */
129 u32 end;
130 } va;
131
132 /** @data: Data to initialize the FW section with. */
133 struct {
134 /** @start: Start offset in the FW binary. */
135 u32 start;
136
137 /** @end: End offset in the FW binary. */
138 u32 end;
139 } data;
140 };
141
142 struct panthor_fw_build_info_hdr {
143 /** @meta_start: Offset of the build info data in the FW binary */
144 u32 meta_start;
145 /** @meta_size: Size of the build info data in the FW binary */
146 u32 meta_size;
147 };
148
149 /**
150 * struct panthor_fw_binary_iter - Firmware binary iterator
151 *
152 * Used to parse a firmware binary.
153 */
154 struct panthor_fw_binary_iter {
155 /** @data: FW binary data. */
156 const void *data;
157
158 /** @size: FW binary size. */
159 size_t size;
160
161 /** @offset: Iterator offset. */
162 size_t offset;
163 };
164
165 /**
166 * struct panthor_fw_section - FW section
167 */
168 struct panthor_fw_section {
169 /** @node: Used to keep track of FW sections. */
170 struct list_head node;
171
172 /** @flags: Section flags, as encoded in the FW binary. */
173 u32 flags;
174
175 /** @mem: Section memory. */
176 struct panthor_kernel_bo *mem;
177
178 /**
179 * @name: Name of the section, as specified in the binary.
180 *
181 * Can be NULL.
182 */
183 const char *name;
184
185 /**
186 * @data: Initial data copied to the FW memory.
187 *
188 * We keep data around so we can reload sections after a reset.
189 */
190 struct {
191 /** @buf: Buffed used to store init data. */
192 const void *buf;
193
194 /** @size: Size of @buf in bytes. */
195 size_t size;
196 } data;
197 };
198
199 #define CSF_MCU_SHARED_REGION_START 0x04000000ULL
200 #define CSF_MCU_SHARED_REGION_SIZE 0x04000000ULL
201
202 #define MIN_CS_PER_CSG 8
203 #define MIN_CSGS 3
204 #define MAX_CSG_PRIO 0xf
205
206 #define CSF_IFACE_VERSION(major, minor, patch) \
207 (((major) << 24) | ((minor) << 16) | (patch))
208 #define CSF_IFACE_VERSION_MAJOR(v) ((v) >> 24)
209 #define CSF_IFACE_VERSION_MINOR(v) (((v) >> 16) & 0xff)
210 #define CSF_IFACE_VERSION_PATCH(v) ((v) & 0xffff)
211
212 #define CSF_GROUP_CONTROL_OFFSET 0x1000
213 #define CSF_STREAM_CONTROL_OFFSET 0x40
214 #define CSF_UNPRESERVED_REG_COUNT 4
215
216 /**
217 * struct panthor_fw_iface - FW interfaces
218 */
219 struct panthor_fw_iface {
220 /** @global: Global interface. */
221 struct panthor_fw_global_iface global;
222
223 /** @groups: Group slot interfaces. */
224 struct panthor_fw_csg_iface groups[MAX_CSGS];
225
226 /** @streams: Command stream slot interfaces. */
227 struct panthor_fw_cs_iface streams[MAX_CSGS][MAX_CS_PER_CSG];
228 };
229
230 /**
231 * struct panthor_fw - Firmware management
232 */
233 struct panthor_fw {
234 /** @vm: MCU VM. */
235 struct panthor_vm *vm;
236
237 /** @sections: List of FW sections. */
238 struct list_head sections;
239
240 /** @shared_section: The section containing the FW interfaces. */
241 struct panthor_fw_section *shared_section;
242
243 /** @iface: FW interfaces. */
244 struct panthor_fw_iface iface;
245
246 /** @watchdog: Collection of fields relating to the FW watchdog. */
247 struct {
248 /** @ping_work: Delayed work used to ping the FW. */
249 struct delayed_work ping_work;
250 } watchdog;
251
252 /**
253 * @req_waitqueue: FW request waitqueue.
254 *
255 * Everytime a request is sent to a command stream group or the global
256 * interface, the caller will first busy wait for the request to be
257 * acknowledged, and then fallback to a sleeping wait.
258 *
259 * This wait queue is here to support the sleeping wait flavor.
260 */
261 wait_queue_head_t req_waitqueue;
262
263 /** @booted: True is the FW is booted */
264 bool booted;
265
266 /** @irq: Job irq data. */
267 struct panthor_irq irq;
268 };
269
panthor_fw_vm(struct panthor_device * ptdev)270 struct panthor_vm *panthor_fw_vm(struct panthor_device *ptdev)
271 {
272 return ptdev->fw->vm;
273 }
274
275 /**
276 * panthor_fw_get_glb_iface() - Get the global interface
277 * @ptdev: Device.
278 *
279 * Return: The global interface.
280 */
281 struct panthor_fw_global_iface *
panthor_fw_get_glb_iface(struct panthor_device * ptdev)282 panthor_fw_get_glb_iface(struct panthor_device *ptdev)
283 {
284 return &ptdev->fw->iface.global;
285 }
286
287 /**
288 * panthor_fw_get_csg_iface() - Get a command stream group slot interface
289 * @ptdev: Device.
290 * @csg_slot: Index of the command stream group slot.
291 *
292 * Return: The command stream group slot interface.
293 */
294 struct panthor_fw_csg_iface *
panthor_fw_get_csg_iface(struct panthor_device * ptdev,u32 csg_slot)295 panthor_fw_get_csg_iface(struct panthor_device *ptdev, u32 csg_slot)
296 {
297 if (drm_WARN_ON(&ptdev->base, csg_slot >= MAX_CSGS))
298 return NULL;
299
300 return &ptdev->fw->iface.groups[csg_slot];
301 }
302
303 /**
304 * panthor_fw_get_cs_iface() - Get a command stream slot interface
305 * @ptdev: Device.
306 * @csg_slot: Index of the command stream group slot.
307 * @cs_slot: Index of the command stream slot.
308 *
309 * Return: The command stream slot interface.
310 */
311 struct panthor_fw_cs_iface *
panthor_fw_get_cs_iface(struct panthor_device * ptdev,u32 csg_slot,u32 cs_slot)312 panthor_fw_get_cs_iface(struct panthor_device *ptdev, u32 csg_slot, u32 cs_slot)
313 {
314 if (drm_WARN_ON(&ptdev->base, csg_slot >= MAX_CSGS || cs_slot >= MAX_CS_PER_CSG))
315 return NULL;
316
317 return &ptdev->fw->iface.streams[csg_slot][cs_slot];
318 }
319
320 /**
321 * panthor_fw_conv_timeout() - Convert a timeout into a cycle-count
322 * @ptdev: Device.
323 * @timeout_us: Timeout expressed in micro-seconds.
324 *
325 * The FW has two timer sources: the GPU counter or arch-timer. We need
326 * to express timeouts in term of number of cycles and specify which
327 * timer source should be used.
328 *
329 * Return: A value suitable for timeout fields in the global interface.
330 */
panthor_fw_conv_timeout(struct panthor_device * ptdev,u32 timeout_us)331 static u32 panthor_fw_conv_timeout(struct panthor_device *ptdev, u32 timeout_us)
332 {
333 bool use_cycle_counter = false;
334 u32 timer_rate = 0;
335 u64 mod_cycles;
336
337 #ifdef CONFIG_ARM_ARCH_TIMER
338 timer_rate = arch_timer_get_cntfrq();
339 #endif
340
341 if (!timer_rate) {
342 use_cycle_counter = true;
343 timer_rate = clk_get_rate(ptdev->clks.core);
344 }
345
346 if (drm_WARN_ON(&ptdev->base, !timer_rate)) {
347 /* We couldn't get a valid clock rate, let's just pick the
348 * maximum value so the FW still handles the core
349 * power on/off requests.
350 */
351 return GLB_TIMER_VAL(~0) |
352 GLB_TIMER_SOURCE_GPU_COUNTER;
353 }
354
355 mod_cycles = DIV_ROUND_UP_ULL((u64)timeout_us * timer_rate,
356 1000000ull << 10);
357 if (drm_WARN_ON(&ptdev->base, mod_cycles > GLB_TIMER_VAL(~0)))
358 mod_cycles = GLB_TIMER_VAL(~0);
359
360 return GLB_TIMER_VAL(mod_cycles) |
361 (use_cycle_counter ? GLB_TIMER_SOURCE_GPU_COUNTER : 0);
362 }
363
panthor_fw_binary_iter_read(struct panthor_device * ptdev,struct panthor_fw_binary_iter * iter,void * out,size_t size)364 static int panthor_fw_binary_iter_read(struct panthor_device *ptdev,
365 struct panthor_fw_binary_iter *iter,
366 void *out, size_t size)
367 {
368 size_t new_offset = iter->offset + size;
369
370 if (new_offset > iter->size || new_offset < iter->offset) {
371 drm_err(&ptdev->base, "Firmware too small\n");
372 return -EINVAL;
373 }
374
375 memcpy(out, iter->data + iter->offset, size);
376 iter->offset = new_offset;
377 return 0;
378 }
379
panthor_fw_binary_sub_iter_init(struct panthor_device * ptdev,struct panthor_fw_binary_iter * iter,struct panthor_fw_binary_iter * sub_iter,size_t size)380 static int panthor_fw_binary_sub_iter_init(struct panthor_device *ptdev,
381 struct panthor_fw_binary_iter *iter,
382 struct panthor_fw_binary_iter *sub_iter,
383 size_t size)
384 {
385 size_t new_offset = iter->offset + size;
386
387 if (new_offset > iter->size || new_offset < iter->offset) {
388 drm_err(&ptdev->base, "Firmware entry too long\n");
389 return -EINVAL;
390 }
391
392 sub_iter->offset = 0;
393 sub_iter->data = iter->data + iter->offset;
394 sub_iter->size = size;
395 iter->offset = new_offset;
396 return 0;
397 }
398
panthor_fw_init_section_mem(struct panthor_device * ptdev,struct panthor_fw_section * section)399 static void panthor_fw_init_section_mem(struct panthor_device *ptdev,
400 struct panthor_fw_section *section)
401 {
402 bool was_mapped = !!section->mem->kmap;
403 int ret;
404
405 if (!section->data.size &&
406 !(section->flags & CSF_FW_BINARY_IFACE_ENTRY_ZERO))
407 return;
408
409 ret = panthor_kernel_bo_vmap(section->mem);
410 if (drm_WARN_ON(&ptdev->base, ret))
411 return;
412
413 memcpy(section->mem->kmap, section->data.buf, section->data.size);
414 if (section->flags & CSF_FW_BINARY_IFACE_ENTRY_ZERO) {
415 memset(section->mem->kmap + section->data.size, 0,
416 panthor_kernel_bo_size(section->mem) - section->data.size);
417 }
418
419 if (!was_mapped)
420 panthor_kernel_bo_vunmap(section->mem);
421 }
422
423 /**
424 * panthor_fw_alloc_queue_iface_mem() - Allocate a ring-buffer interfaces.
425 * @ptdev: Device.
426 * @input: Pointer holding the input interface on success.
427 * Should be ignored on failure.
428 * @output: Pointer holding the output interface on success.
429 * Should be ignored on failure.
430 * @input_fw_va: Pointer holding the input interface FW VA on success.
431 * Should be ignored on failure.
432 * @output_fw_va: Pointer holding the output interface FW VA on success.
433 * Should be ignored on failure.
434 *
435 * Allocates panthor_fw_ringbuf_{input,out}_iface interfaces. The input
436 * interface is at offset 0, and the output interface at offset 4096.
437 *
438 * Return: A valid pointer in case of success, an ERR_PTR() otherwise.
439 */
440 struct panthor_kernel_bo *
panthor_fw_alloc_queue_iface_mem(struct panthor_device * ptdev,struct panthor_fw_ringbuf_input_iface ** input,const struct panthor_fw_ringbuf_output_iface ** output,u32 * input_fw_va,u32 * output_fw_va)441 panthor_fw_alloc_queue_iface_mem(struct panthor_device *ptdev,
442 struct panthor_fw_ringbuf_input_iface **input,
443 const struct panthor_fw_ringbuf_output_iface **output,
444 u32 *input_fw_va, u32 *output_fw_va)
445 {
446 struct panthor_kernel_bo *mem;
447 int ret;
448
449 mem = panthor_kernel_bo_create(ptdev, ptdev->fw->vm, SZ_8K,
450 DRM_PANTHOR_BO_NO_MMAP,
451 DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
452 DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
453 PANTHOR_VM_KERNEL_AUTO_VA);
454 if (IS_ERR(mem))
455 return mem;
456
457 ret = panthor_kernel_bo_vmap(mem);
458 if (ret) {
459 panthor_kernel_bo_destroy(mem);
460 return ERR_PTR(ret);
461 }
462
463 memset(mem->kmap, 0, panthor_kernel_bo_size(mem));
464 *input = mem->kmap;
465 *output = mem->kmap + SZ_4K;
466 *input_fw_va = panthor_kernel_bo_gpuva(mem);
467 *output_fw_va = *input_fw_va + SZ_4K;
468
469 return mem;
470 }
471
472 /**
473 * panthor_fw_alloc_suspend_buf_mem() - Allocate a suspend buffer for a command stream group.
474 * @ptdev: Device.
475 * @size: Size of the suspend buffer.
476 *
477 * Return: A valid pointer in case of success, an ERR_PTR() otherwise.
478 */
479 struct panthor_kernel_bo *
panthor_fw_alloc_suspend_buf_mem(struct panthor_device * ptdev,size_t size)480 panthor_fw_alloc_suspend_buf_mem(struct panthor_device *ptdev, size_t size)
481 {
482 return panthor_kernel_bo_create(ptdev, panthor_fw_vm(ptdev), size,
483 DRM_PANTHOR_BO_NO_MMAP,
484 DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC,
485 PANTHOR_VM_KERNEL_AUTO_VA);
486 }
487
panthor_fw_load_section_entry(struct panthor_device * ptdev,const struct firmware * fw,struct panthor_fw_binary_iter * iter,u32 ehdr)488 static int panthor_fw_load_section_entry(struct panthor_device *ptdev,
489 const struct firmware *fw,
490 struct panthor_fw_binary_iter *iter,
491 u32 ehdr)
492 {
493 ssize_t vm_pgsz = panthor_vm_page_size(ptdev->fw->vm);
494 struct panthor_fw_binary_section_entry_hdr hdr;
495 struct panthor_fw_section *section;
496 u32 section_size;
497 u32 name_len;
498 int ret;
499
500 ret = panthor_fw_binary_iter_read(ptdev, iter, &hdr, sizeof(hdr));
501 if (ret)
502 return ret;
503
504 if (hdr.data.end < hdr.data.start) {
505 drm_err(&ptdev->base, "Firmware corrupted, data.end < data.start (0x%x < 0x%x)\n",
506 hdr.data.end, hdr.data.start);
507 return -EINVAL;
508 }
509
510 if (hdr.va.end < hdr.va.start) {
511 drm_err(&ptdev->base, "Firmware corrupted, hdr.va.end < hdr.va.start (0x%x < 0x%x)\n",
512 hdr.va.end, hdr.va.start);
513 return -EINVAL;
514 }
515
516 if (hdr.data.end > fw->size) {
517 drm_err(&ptdev->base, "Firmware corrupted, file truncated? data_end=0x%x > fw size=0x%zx\n",
518 hdr.data.end, fw->size);
519 return -EINVAL;
520 }
521
522 if (!IS_ALIGNED(hdr.va.start, vm_pgsz) || !IS_ALIGNED(hdr.va.end, vm_pgsz)) {
523 drm_err(&ptdev->base, "Firmware corrupted, virtual addresses not page aligned: 0x%x-0x%x\n",
524 hdr.va.start, hdr.va.end);
525 return -EINVAL;
526 }
527
528 if (hdr.flags & ~CSF_FW_BINARY_IFACE_ENTRY_SUPPORTED_FLAGS) {
529 drm_err(&ptdev->base, "Firmware contains interface with unsupported flags (0x%x)\n",
530 hdr.flags);
531 return -EINVAL;
532 }
533
534 if (hdr.flags & CSF_FW_BINARY_IFACE_ENTRY_PROT) {
535 drm_warn(&ptdev->base,
536 "Firmware protected mode entry not be supported, ignoring");
537 return 0;
538 }
539
540 if (hdr.va.start == CSF_MCU_SHARED_REGION_START &&
541 !(hdr.flags & CSF_FW_BINARY_IFACE_ENTRY_SHARED)) {
542 drm_err(&ptdev->base,
543 "Interface at 0x%llx must be shared", CSF_MCU_SHARED_REGION_START);
544 return -EINVAL;
545 }
546
547 name_len = iter->size - iter->offset;
548
549 section = drmm_kzalloc(&ptdev->base, sizeof(*section), GFP_KERNEL);
550 if (!section)
551 return -ENOMEM;
552
553 list_add_tail(§ion->node, &ptdev->fw->sections);
554 section->flags = hdr.flags;
555 section->data.size = hdr.data.end - hdr.data.start;
556
557 if (section->data.size > 0) {
558 void *data = drmm_kmalloc(&ptdev->base, section->data.size, GFP_KERNEL);
559
560 if (!data)
561 return -ENOMEM;
562
563 memcpy(data, fw->data + hdr.data.start, section->data.size);
564 section->data.buf = data;
565 }
566
567 if (name_len > 0) {
568 char *name = drmm_kmalloc(&ptdev->base, name_len + 1, GFP_KERNEL);
569
570 if (!name)
571 return -ENOMEM;
572
573 memcpy(name, iter->data + iter->offset, name_len);
574 name[name_len] = '\0';
575 section->name = name;
576 }
577
578 section_size = hdr.va.end - hdr.va.start;
579 if (section_size) {
580 u32 cache_mode = hdr.flags & CSF_FW_BINARY_IFACE_ENTRY_CACHE_MODE_MASK;
581 struct panthor_gem_object *bo;
582 u32 vm_map_flags = 0;
583 struct sg_table *sgt;
584 u64 va = hdr.va.start;
585
586 if (!(hdr.flags & CSF_FW_BINARY_IFACE_ENTRY_WR))
587 vm_map_flags |= DRM_PANTHOR_VM_BIND_OP_MAP_READONLY;
588
589 if (!(hdr.flags & CSF_FW_BINARY_IFACE_ENTRY_EX))
590 vm_map_flags |= DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC;
591
592 /* TODO: CSF_FW_BINARY_IFACE_ENTRY_CACHE_MODE_*_COHERENT are mapped to
593 * non-cacheable for now. We might want to introduce a new
594 * IOMMU_xxx flag (or abuse IOMMU_MMIO, which maps to device
595 * memory and is currently not used by our driver) for
596 * AS_MEMATTR_AARCH64_SHARED memory, so we can take benefit
597 * of IO-coherent systems.
598 */
599 if (cache_mode != CSF_FW_BINARY_IFACE_ENTRY_CACHE_MODE_CACHED)
600 vm_map_flags |= DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED;
601
602 section->mem = panthor_kernel_bo_create(ptdev, panthor_fw_vm(ptdev),
603 section_size,
604 DRM_PANTHOR_BO_NO_MMAP,
605 vm_map_flags, va);
606 if (IS_ERR(section->mem))
607 return PTR_ERR(section->mem);
608
609 if (drm_WARN_ON(&ptdev->base, section->mem->va_node.start != hdr.va.start))
610 return -EINVAL;
611
612 if (section->flags & CSF_FW_BINARY_IFACE_ENTRY_SHARED) {
613 ret = panthor_kernel_bo_vmap(section->mem);
614 if (ret)
615 return ret;
616 }
617
618 panthor_fw_init_section_mem(ptdev, section);
619
620 bo = to_panthor_bo(section->mem->obj);
621 sgt = drm_gem_shmem_get_pages_sgt(&bo->base);
622 if (IS_ERR(sgt))
623 return PTR_ERR(sgt);
624
625 dma_sync_sgtable_for_device(ptdev->base.dev, sgt, DMA_TO_DEVICE);
626 }
627
628 if (hdr.va.start == CSF_MCU_SHARED_REGION_START)
629 ptdev->fw->shared_section = section;
630
631 return 0;
632 }
633
panthor_fw_read_build_info(struct panthor_device * ptdev,const struct firmware * fw,struct panthor_fw_binary_iter * iter,u32 ehdr)634 static int panthor_fw_read_build_info(struct panthor_device *ptdev,
635 const struct firmware *fw,
636 struct panthor_fw_binary_iter *iter,
637 u32 ehdr)
638 {
639 struct panthor_fw_build_info_hdr hdr;
640 char header[9];
641 const char git_sha_header[sizeof(header)] = "git_sha: ";
642 int ret;
643
644 ret = panthor_fw_binary_iter_read(ptdev, iter, &hdr, sizeof(hdr));
645 if (ret)
646 return ret;
647
648 if (hdr.meta_start > fw->size ||
649 hdr.meta_start + hdr.meta_size > fw->size) {
650 drm_err(&ptdev->base, "Firmware build info corrupt\n");
651 /* We don't need the build info, so continue */
652 return 0;
653 }
654
655 if (memcmp(git_sha_header, fw->data + hdr.meta_start,
656 sizeof(git_sha_header))) {
657 /* Not the expected header, this isn't metadata we understand */
658 return 0;
659 }
660
661 /* Check that the git SHA is NULL terminated as expected */
662 if (fw->data[hdr.meta_start + hdr.meta_size - 1] != '\0') {
663 drm_warn(&ptdev->base, "Firmware's git sha is not NULL terminated\n");
664 /* Don't treat as fatal */
665 return 0;
666 }
667
668 drm_info(&ptdev->base, "Firmware git sha: %s\n",
669 fw->data + hdr.meta_start + sizeof(git_sha_header));
670
671 return 0;
672 }
673
674 static void
panthor_reload_fw_sections(struct panthor_device * ptdev,bool full_reload)675 panthor_reload_fw_sections(struct panthor_device *ptdev, bool full_reload)
676 {
677 struct panthor_fw_section *section;
678
679 list_for_each_entry(section, &ptdev->fw->sections, node) {
680 struct sg_table *sgt;
681
682 if (!full_reload && !(section->flags & CSF_FW_BINARY_IFACE_ENTRY_WR))
683 continue;
684
685 panthor_fw_init_section_mem(ptdev, section);
686 sgt = drm_gem_shmem_get_pages_sgt(&to_panthor_bo(section->mem->obj)->base);
687 if (!drm_WARN_ON(&ptdev->base, IS_ERR_OR_NULL(sgt)))
688 dma_sync_sgtable_for_device(ptdev->base.dev, sgt, DMA_TO_DEVICE);
689 }
690 }
691
panthor_fw_load_entry(struct panthor_device * ptdev,const struct firmware * fw,struct panthor_fw_binary_iter * iter)692 static int panthor_fw_load_entry(struct panthor_device *ptdev,
693 const struct firmware *fw,
694 struct panthor_fw_binary_iter *iter)
695 {
696 struct panthor_fw_binary_iter eiter;
697 u32 ehdr;
698 int ret;
699
700 ret = panthor_fw_binary_iter_read(ptdev, iter, &ehdr, sizeof(ehdr));
701 if (ret)
702 return ret;
703
704 if ((iter->offset % sizeof(u32)) ||
705 (CSF_FW_BINARY_ENTRY_SIZE(ehdr) % sizeof(u32))) {
706 drm_err(&ptdev->base, "Firmware entry isn't 32 bit aligned, offset=0x%x size=0x%x\n",
707 (u32)(iter->offset - sizeof(u32)), CSF_FW_BINARY_ENTRY_SIZE(ehdr));
708 return -EINVAL;
709 }
710
711 if (panthor_fw_binary_sub_iter_init(ptdev, iter, &eiter,
712 CSF_FW_BINARY_ENTRY_SIZE(ehdr) - sizeof(ehdr)))
713 return -EINVAL;
714
715 switch (CSF_FW_BINARY_ENTRY_TYPE(ehdr)) {
716 case CSF_FW_BINARY_ENTRY_TYPE_IFACE:
717 return panthor_fw_load_section_entry(ptdev, fw, &eiter, ehdr);
718 case CSF_FW_BINARY_ENTRY_TYPE_BUILD_INFO_METADATA:
719 return panthor_fw_read_build_info(ptdev, fw, &eiter, ehdr);
720
721 /* FIXME: handle those entry types? */
722 case CSF_FW_BINARY_ENTRY_TYPE_CONFIG:
723 case CSF_FW_BINARY_ENTRY_TYPE_FUTF_TEST:
724 case CSF_FW_BINARY_ENTRY_TYPE_TRACE_BUFFER:
725 case CSF_FW_BINARY_ENTRY_TYPE_TIMELINE_METADATA:
726 return 0;
727 default:
728 break;
729 }
730
731 if (ehdr & CSF_FW_BINARY_ENTRY_OPTIONAL)
732 return 0;
733
734 drm_err(&ptdev->base,
735 "Unsupported non-optional entry type %u in firmware\n",
736 CSF_FW_BINARY_ENTRY_TYPE(ehdr));
737 return -EINVAL;
738 }
739
panthor_fw_load(struct panthor_device * ptdev)740 static int panthor_fw_load(struct panthor_device *ptdev)
741 {
742 const struct firmware *fw = NULL;
743 struct panthor_fw_binary_iter iter = {};
744 struct panthor_fw_binary_hdr hdr;
745 char fw_path[128];
746 int ret;
747
748 snprintf(fw_path, sizeof(fw_path), "arm/mali/arch%d.%d/%s",
749 (u32)GPU_ARCH_MAJOR(ptdev->gpu_info.gpu_id),
750 (u32)GPU_ARCH_MINOR(ptdev->gpu_info.gpu_id),
751 CSF_FW_NAME);
752
753 ret = request_firmware(&fw, fw_path, ptdev->base.dev);
754 if (ret) {
755 drm_err(&ptdev->base, "Failed to load firmware image '%s'\n",
756 CSF_FW_NAME);
757 return ret;
758 }
759
760 iter.data = fw->data;
761 iter.size = fw->size;
762 ret = panthor_fw_binary_iter_read(ptdev, &iter, &hdr, sizeof(hdr));
763 if (ret)
764 goto out;
765
766 if (hdr.magic != CSF_FW_BINARY_HEADER_MAGIC) {
767 ret = -EINVAL;
768 drm_err(&ptdev->base, "Invalid firmware magic\n");
769 goto out;
770 }
771
772 if (hdr.major != CSF_FW_BINARY_HEADER_MAJOR_MAX) {
773 ret = -EINVAL;
774 drm_err(&ptdev->base, "Unsupported firmware binary header version %d.%d (expected %d.x)\n",
775 hdr.major, hdr.minor, CSF_FW_BINARY_HEADER_MAJOR_MAX);
776 goto out;
777 }
778
779 if (hdr.size > iter.size) {
780 drm_err(&ptdev->base, "Firmware image is truncated\n");
781 goto out;
782 }
783
784 iter.size = hdr.size;
785
786 while (iter.offset < hdr.size) {
787 ret = panthor_fw_load_entry(ptdev, fw, &iter);
788 if (ret)
789 goto out;
790 }
791
792 if (!ptdev->fw->shared_section) {
793 drm_err(&ptdev->base, "Shared interface region not found\n");
794 ret = -EINVAL;
795 goto out;
796 }
797
798 out:
799 release_firmware(fw);
800 return ret;
801 }
802
803 /**
804 * iface_fw_to_cpu_addr() - Turn an MCU address into a CPU address
805 * @ptdev: Device.
806 * @mcu_va: MCU address.
807 *
808 * Return: NULL if the address is not part of the shared section, non-NULL otherwise.
809 */
iface_fw_to_cpu_addr(struct panthor_device * ptdev,u32 mcu_va)810 static void *iface_fw_to_cpu_addr(struct panthor_device *ptdev, u32 mcu_va)
811 {
812 u64 shared_mem_start = panthor_kernel_bo_gpuva(ptdev->fw->shared_section->mem);
813 u64 shared_mem_end = shared_mem_start +
814 panthor_kernel_bo_size(ptdev->fw->shared_section->mem);
815 if (mcu_va < shared_mem_start || mcu_va >= shared_mem_end)
816 return NULL;
817
818 return ptdev->fw->shared_section->mem->kmap + (mcu_va - shared_mem_start);
819 }
820
panthor_init_cs_iface(struct panthor_device * ptdev,unsigned int csg_idx,unsigned int cs_idx)821 static int panthor_init_cs_iface(struct panthor_device *ptdev,
822 unsigned int csg_idx, unsigned int cs_idx)
823 {
824 struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
825 struct panthor_fw_csg_iface *csg_iface = panthor_fw_get_csg_iface(ptdev, csg_idx);
826 struct panthor_fw_cs_iface *cs_iface = &ptdev->fw->iface.streams[csg_idx][cs_idx];
827 u64 shared_section_sz = panthor_kernel_bo_size(ptdev->fw->shared_section->mem);
828 u32 iface_offset = CSF_GROUP_CONTROL_OFFSET +
829 (csg_idx * glb_iface->control->group_stride) +
830 CSF_STREAM_CONTROL_OFFSET +
831 (cs_idx * csg_iface->control->stream_stride);
832 struct panthor_fw_cs_iface *first_cs_iface =
833 panthor_fw_get_cs_iface(ptdev, 0, 0);
834
835 if (iface_offset + sizeof(*cs_iface) >= shared_section_sz)
836 return -EINVAL;
837
838 spin_lock_init(&cs_iface->lock);
839 cs_iface->control = ptdev->fw->shared_section->mem->kmap + iface_offset;
840 cs_iface->input = iface_fw_to_cpu_addr(ptdev, cs_iface->control->input_va);
841 cs_iface->output = iface_fw_to_cpu_addr(ptdev, cs_iface->control->output_va);
842
843 if (!cs_iface->input || !cs_iface->output) {
844 drm_err(&ptdev->base, "Invalid stream control interface input/output VA");
845 return -EINVAL;
846 }
847
848 if (cs_iface != first_cs_iface) {
849 if (cs_iface->control->features != first_cs_iface->control->features) {
850 drm_err(&ptdev->base, "Expecting identical CS slots");
851 return -EINVAL;
852 }
853 } else {
854 u32 reg_count = CS_FEATURES_WORK_REGS(cs_iface->control->features);
855
856 ptdev->csif_info.cs_reg_count = reg_count;
857 ptdev->csif_info.unpreserved_cs_reg_count = CSF_UNPRESERVED_REG_COUNT;
858 }
859
860 return 0;
861 }
862
compare_csg(const struct panthor_fw_csg_control_iface * a,const struct panthor_fw_csg_control_iface * b)863 static bool compare_csg(const struct panthor_fw_csg_control_iface *a,
864 const struct panthor_fw_csg_control_iface *b)
865 {
866 if (a->features != b->features)
867 return false;
868 if (a->suspend_size != b->suspend_size)
869 return false;
870 if (a->protm_suspend_size != b->protm_suspend_size)
871 return false;
872 if (a->stream_num != b->stream_num)
873 return false;
874 return true;
875 }
876
panthor_init_csg_iface(struct panthor_device * ptdev,unsigned int csg_idx)877 static int panthor_init_csg_iface(struct panthor_device *ptdev,
878 unsigned int csg_idx)
879 {
880 struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
881 struct panthor_fw_csg_iface *csg_iface = &ptdev->fw->iface.groups[csg_idx];
882 u64 shared_section_sz = panthor_kernel_bo_size(ptdev->fw->shared_section->mem);
883 u32 iface_offset = CSF_GROUP_CONTROL_OFFSET + (csg_idx * glb_iface->control->group_stride);
884 unsigned int i;
885
886 if (iface_offset + sizeof(*csg_iface) >= shared_section_sz)
887 return -EINVAL;
888
889 spin_lock_init(&csg_iface->lock);
890 csg_iface->control = ptdev->fw->shared_section->mem->kmap + iface_offset;
891 csg_iface->input = iface_fw_to_cpu_addr(ptdev, csg_iface->control->input_va);
892 csg_iface->output = iface_fw_to_cpu_addr(ptdev, csg_iface->control->output_va);
893
894 if (csg_iface->control->stream_num < MIN_CS_PER_CSG ||
895 csg_iface->control->stream_num > MAX_CS_PER_CSG)
896 return -EINVAL;
897
898 if (!csg_iface->input || !csg_iface->output) {
899 drm_err(&ptdev->base, "Invalid group control interface input/output VA");
900 return -EINVAL;
901 }
902
903 if (csg_idx > 0) {
904 struct panthor_fw_csg_iface *first_csg_iface =
905 panthor_fw_get_csg_iface(ptdev, 0);
906
907 if (!compare_csg(first_csg_iface->control, csg_iface->control)) {
908 drm_err(&ptdev->base, "Expecting identical CSG slots");
909 return -EINVAL;
910 }
911 }
912
913 for (i = 0; i < csg_iface->control->stream_num; i++) {
914 int ret = panthor_init_cs_iface(ptdev, csg_idx, i);
915
916 if (ret)
917 return ret;
918 }
919
920 return 0;
921 }
922
panthor_get_instr_features(struct panthor_device * ptdev)923 static u32 panthor_get_instr_features(struct panthor_device *ptdev)
924 {
925 struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
926
927 if (glb_iface->control->version < CSF_IFACE_VERSION(1, 1, 0))
928 return 0;
929
930 return glb_iface->control->instr_features;
931 }
932
panthor_fw_init_ifaces(struct panthor_device * ptdev)933 static int panthor_fw_init_ifaces(struct panthor_device *ptdev)
934 {
935 struct panthor_fw_global_iface *glb_iface = &ptdev->fw->iface.global;
936 unsigned int i;
937
938 if (!ptdev->fw->shared_section->mem->kmap)
939 return -EINVAL;
940
941 spin_lock_init(&glb_iface->lock);
942 glb_iface->control = ptdev->fw->shared_section->mem->kmap;
943
944 if (!glb_iface->control->version) {
945 drm_err(&ptdev->base, "Firmware version is 0. Firmware may have failed to boot");
946 return -EINVAL;
947 }
948
949 glb_iface->input = iface_fw_to_cpu_addr(ptdev, glb_iface->control->input_va);
950 glb_iface->output = iface_fw_to_cpu_addr(ptdev, glb_iface->control->output_va);
951 if (!glb_iface->input || !glb_iface->output) {
952 drm_err(&ptdev->base, "Invalid global control interface input/output VA");
953 return -EINVAL;
954 }
955
956 if (glb_iface->control->group_num > MAX_CSGS ||
957 glb_iface->control->group_num < MIN_CSGS) {
958 drm_err(&ptdev->base, "Invalid number of control groups");
959 return -EINVAL;
960 }
961
962 for (i = 0; i < glb_iface->control->group_num; i++) {
963 int ret = panthor_init_csg_iface(ptdev, i);
964
965 if (ret)
966 return ret;
967 }
968
969 drm_info(&ptdev->base, "CSF FW using interface v%d.%d.%d, Features %#x Instrumentation features %#x",
970 CSF_IFACE_VERSION_MAJOR(glb_iface->control->version),
971 CSF_IFACE_VERSION_MINOR(glb_iface->control->version),
972 CSF_IFACE_VERSION_PATCH(glb_iface->control->version),
973 glb_iface->control->features,
974 panthor_get_instr_features(ptdev));
975 return 0;
976 }
977
panthor_fw_init_global_iface(struct panthor_device * ptdev)978 static void panthor_fw_init_global_iface(struct panthor_device *ptdev)
979 {
980 struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
981
982 /* Enable all cores. */
983 glb_iface->input->core_en_mask = ptdev->gpu_info.shader_present;
984
985 /* Setup timers. */
986 glb_iface->input->poweroff_timer = panthor_fw_conv_timeout(ptdev, PWROFF_HYSTERESIS_US);
987 glb_iface->input->progress_timer = PROGRESS_TIMEOUT_CYCLES >> PROGRESS_TIMEOUT_SCALE_SHIFT;
988 glb_iface->input->idle_timer = panthor_fw_conv_timeout(ptdev, IDLE_HYSTERESIS_US);
989
990 /* Enable interrupts we care about. */
991 glb_iface->input->ack_irq_mask = GLB_CFG_ALLOC_EN |
992 GLB_PING |
993 GLB_CFG_PROGRESS_TIMER |
994 GLB_CFG_POWEROFF_TIMER |
995 GLB_IDLE_EN |
996 GLB_IDLE;
997
998 panthor_fw_update_reqs(glb_iface, req, GLB_IDLE_EN, GLB_IDLE_EN);
999 panthor_fw_toggle_reqs(glb_iface, req, ack,
1000 GLB_CFG_ALLOC_EN |
1001 GLB_CFG_POWEROFF_TIMER |
1002 GLB_CFG_PROGRESS_TIMER);
1003
1004 gpu_write(ptdev, CSF_DOORBELL(CSF_GLB_DOORBELL_ID), 1);
1005
1006 /* Kick the watchdog. */
1007 mod_delayed_work(ptdev->reset.wq, &ptdev->fw->watchdog.ping_work,
1008 msecs_to_jiffies(PING_INTERVAL_MS));
1009 }
1010
panthor_job_irq_handler(struct panthor_device * ptdev,u32 status)1011 static void panthor_job_irq_handler(struct panthor_device *ptdev, u32 status)
1012 {
1013 if (!ptdev->fw->booted && (status & JOB_INT_GLOBAL_IF))
1014 ptdev->fw->booted = true;
1015
1016 wake_up_all(&ptdev->fw->req_waitqueue);
1017
1018 /* If the FW is not booted, don't process IRQs, just flag the FW as booted. */
1019 if (!ptdev->fw->booted)
1020 return;
1021
1022 panthor_sched_report_fw_events(ptdev, status);
1023 }
1024 PANTHOR_IRQ_HANDLER(job, JOB, panthor_job_irq_handler);
1025
panthor_fw_start(struct panthor_device * ptdev)1026 static int panthor_fw_start(struct panthor_device *ptdev)
1027 {
1028 bool timedout = false;
1029
1030 ptdev->fw->booted = false;
1031 panthor_job_irq_resume(&ptdev->fw->irq, ~0);
1032 gpu_write(ptdev, MCU_CONTROL, MCU_CONTROL_AUTO);
1033
1034 if (!wait_event_timeout(ptdev->fw->req_waitqueue,
1035 ptdev->fw->booted,
1036 msecs_to_jiffies(1000))) {
1037 if (!ptdev->fw->booted &&
1038 !(gpu_read(ptdev, JOB_INT_STAT) & JOB_INT_GLOBAL_IF))
1039 timedout = true;
1040 }
1041
1042 if (timedout) {
1043 static const char * const status_str[] = {
1044 [MCU_STATUS_DISABLED] = "disabled",
1045 [MCU_STATUS_ENABLED] = "enabled",
1046 [MCU_STATUS_HALT] = "halt",
1047 [MCU_STATUS_FATAL] = "fatal",
1048 };
1049 u32 status = gpu_read(ptdev, MCU_STATUS);
1050
1051 drm_err(&ptdev->base, "Failed to boot MCU (status=%s)",
1052 status < ARRAY_SIZE(status_str) ? status_str[status] : "unknown");
1053 return -ETIMEDOUT;
1054 }
1055
1056 return 0;
1057 }
1058
panthor_fw_stop(struct panthor_device * ptdev)1059 static void panthor_fw_stop(struct panthor_device *ptdev)
1060 {
1061 u32 status;
1062
1063 gpu_write(ptdev, MCU_CONTROL, MCU_CONTROL_DISABLE);
1064 if (readl_poll_timeout(ptdev->iomem + MCU_STATUS, status,
1065 status == MCU_STATUS_DISABLED, 10, 100000))
1066 drm_err(&ptdev->base, "Failed to stop MCU");
1067 }
1068
1069 /**
1070 * panthor_fw_pre_reset() - Call before a reset.
1071 * @ptdev: Device.
1072 * @on_hang: true if the reset was triggered on a GPU hang.
1073 *
1074 * If the reset is not triggered on a hang, we try to gracefully halt the
1075 * MCU, so we can do a fast-reset when panthor_fw_post_reset() is called.
1076 */
panthor_fw_pre_reset(struct panthor_device * ptdev,bool on_hang)1077 void panthor_fw_pre_reset(struct panthor_device *ptdev, bool on_hang)
1078 {
1079 /* Make sure we won't be woken up by a ping. */
1080 cancel_delayed_work_sync(&ptdev->fw->watchdog.ping_work);
1081
1082 ptdev->reset.fast = false;
1083
1084 if (!on_hang) {
1085 struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
1086 u32 status;
1087
1088 panthor_fw_update_reqs(glb_iface, req, GLB_HALT, GLB_HALT);
1089 gpu_write(ptdev, CSF_DOORBELL(CSF_GLB_DOORBELL_ID), 1);
1090 if (!readl_poll_timeout(ptdev->iomem + MCU_STATUS, status,
1091 status == MCU_STATUS_HALT, 10, 100000)) {
1092 ptdev->reset.fast = true;
1093 } else {
1094 drm_warn(&ptdev->base, "Failed to cleanly suspend MCU");
1095 }
1096 }
1097
1098 panthor_job_irq_suspend(&ptdev->fw->irq);
1099 }
1100
1101 /**
1102 * panthor_fw_post_reset() - Call after a reset.
1103 * @ptdev: Device.
1104 *
1105 * Start the FW. If this is not a fast reset, all FW sections are reloaded to
1106 * make sure we can recover from a memory corruption.
1107 */
panthor_fw_post_reset(struct panthor_device * ptdev)1108 int panthor_fw_post_reset(struct panthor_device *ptdev)
1109 {
1110 int ret;
1111
1112 /* Make the MCU VM active. */
1113 ret = panthor_vm_active(ptdev->fw->vm);
1114 if (ret)
1115 return ret;
1116
1117 if (!ptdev->reset.fast) {
1118 /* On a slow reset, reload all sections, including RO ones.
1119 * We're not supposed to end up here anyway, let's just assume
1120 * the overhead of reloading everything is acceptable.
1121 */
1122 panthor_reload_fw_sections(ptdev, true);
1123 } else {
1124 /* The FW detects 0 -> 1 transitions. Make sure we reset
1125 * the HALT bit before the FW is rebooted.
1126 * This is not needed on a slow reset because FW sections are
1127 * re-initialized.
1128 */
1129 struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
1130
1131 panthor_fw_update_reqs(glb_iface, req, 0, GLB_HALT);
1132 }
1133
1134 ret = panthor_fw_start(ptdev);
1135 if (ret) {
1136 drm_err(&ptdev->base, "FW %s reset failed",
1137 ptdev->reset.fast ? "fast" : "slow");
1138 return ret;
1139 }
1140
1141 /* We must re-initialize the global interface even on fast-reset. */
1142 panthor_fw_init_global_iface(ptdev);
1143 return 0;
1144 }
1145
1146 /**
1147 * panthor_fw_unplug() - Called when the device is unplugged.
1148 * @ptdev: Device.
1149 *
1150 * This function must make sure all pending operations are flushed before
1151 * will release device resources, thus preventing any interaction with
1152 * the HW.
1153 *
1154 * If there is still FW-related work running after this function returns,
1155 * they must use drm_dev_{enter,exit}() and skip any HW access when
1156 * drm_dev_enter() returns false.
1157 */
panthor_fw_unplug(struct panthor_device * ptdev)1158 void panthor_fw_unplug(struct panthor_device *ptdev)
1159 {
1160 struct panthor_fw_section *section;
1161
1162 cancel_delayed_work_sync(&ptdev->fw->watchdog.ping_work);
1163
1164 if (!IS_ENABLED(CONFIG_PM) || pm_runtime_active(ptdev->base.dev)) {
1165 /* Make sure the IRQ handler cannot be called after that point. */
1166 if (ptdev->fw->irq.irq)
1167 panthor_job_irq_suspend(&ptdev->fw->irq);
1168
1169 panthor_fw_stop(ptdev);
1170 }
1171
1172 list_for_each_entry(section, &ptdev->fw->sections, node)
1173 panthor_kernel_bo_destroy(section->mem);
1174
1175 /* We intentionally don't call panthor_vm_idle() and let
1176 * panthor_mmu_unplug() release the AS we acquired with
1177 * panthor_vm_active() so we don't have to track the VM active/idle
1178 * state to keep the active_refcnt balanced.
1179 */
1180 panthor_vm_put(ptdev->fw->vm);
1181 ptdev->fw->vm = NULL;
1182
1183 if (!IS_ENABLED(CONFIG_PM) || pm_runtime_active(ptdev->base.dev))
1184 panthor_gpu_power_off(ptdev, L2, ptdev->gpu_info.l2_present, 20000);
1185 }
1186
1187 /**
1188 * panthor_fw_wait_acks() - Wait for requests to be acknowledged by the FW.
1189 * @req_ptr: Pointer to the req register.
1190 * @ack_ptr: Pointer to the ack register.
1191 * @wq: Wait queue to use for the sleeping wait.
1192 * @req_mask: Mask of requests to wait for.
1193 * @acked: Pointer to field that's updated with the acked requests.
1194 * If the function returns 0, *acked == req_mask.
1195 * @timeout_ms: Timeout expressed in milliseconds.
1196 *
1197 * Return: 0 on success, -ETIMEDOUT otherwise.
1198 */
panthor_fw_wait_acks(const u32 * req_ptr,const u32 * ack_ptr,wait_queue_head_t * wq,u32 req_mask,u32 * acked,u32 timeout_ms)1199 static int panthor_fw_wait_acks(const u32 *req_ptr, const u32 *ack_ptr,
1200 wait_queue_head_t *wq,
1201 u32 req_mask, u32 *acked,
1202 u32 timeout_ms)
1203 {
1204 u32 ack, req = READ_ONCE(*req_ptr) & req_mask;
1205 int ret;
1206
1207 /* Busy wait for a few µsecs before falling back to a sleeping wait. */
1208 *acked = req_mask;
1209 ret = read_poll_timeout_atomic(READ_ONCE, ack,
1210 (ack & req_mask) == req,
1211 0, 10, 0,
1212 *ack_ptr);
1213 if (!ret)
1214 return 0;
1215
1216 if (wait_event_timeout(*wq, (READ_ONCE(*ack_ptr) & req_mask) == req,
1217 msecs_to_jiffies(timeout_ms)))
1218 return 0;
1219
1220 /* Check one last time, in case we were not woken up for some reason. */
1221 ack = READ_ONCE(*ack_ptr);
1222 if ((ack & req_mask) == req)
1223 return 0;
1224
1225 *acked = ~(req ^ ack) & req_mask;
1226 return -ETIMEDOUT;
1227 }
1228
1229 /**
1230 * panthor_fw_glb_wait_acks() - Wait for global requests to be acknowledged.
1231 * @ptdev: Device.
1232 * @req_mask: Mask of requests to wait for.
1233 * @acked: Pointer to field that's updated with the acked requests.
1234 * If the function returns 0, *acked == req_mask.
1235 * @timeout_ms: Timeout expressed in milliseconds.
1236 *
1237 * Return: 0 on success, -ETIMEDOUT otherwise.
1238 */
panthor_fw_glb_wait_acks(struct panthor_device * ptdev,u32 req_mask,u32 * acked,u32 timeout_ms)1239 int panthor_fw_glb_wait_acks(struct panthor_device *ptdev,
1240 u32 req_mask, u32 *acked,
1241 u32 timeout_ms)
1242 {
1243 struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
1244
1245 /* GLB_HALT doesn't get acked through the FW interface. */
1246 if (drm_WARN_ON(&ptdev->base, req_mask & (~GLB_REQ_MASK | GLB_HALT)))
1247 return -EINVAL;
1248
1249 return panthor_fw_wait_acks(&glb_iface->input->req,
1250 &glb_iface->output->ack,
1251 &ptdev->fw->req_waitqueue,
1252 req_mask, acked, timeout_ms);
1253 }
1254
1255 /**
1256 * panthor_fw_csg_wait_acks() - Wait for command stream group requests to be acknowledged.
1257 * @ptdev: Device.
1258 * @csg_slot: CSG slot ID.
1259 * @req_mask: Mask of requests to wait for.
1260 * @acked: Pointer to field that's updated with the acked requests.
1261 * If the function returns 0, *acked == req_mask.
1262 * @timeout_ms: Timeout expressed in milliseconds.
1263 *
1264 * Return: 0 on success, -ETIMEDOUT otherwise.
1265 */
panthor_fw_csg_wait_acks(struct panthor_device * ptdev,u32 csg_slot,u32 req_mask,u32 * acked,u32 timeout_ms)1266 int panthor_fw_csg_wait_acks(struct panthor_device *ptdev, u32 csg_slot,
1267 u32 req_mask, u32 *acked, u32 timeout_ms)
1268 {
1269 struct panthor_fw_csg_iface *csg_iface = panthor_fw_get_csg_iface(ptdev, csg_slot);
1270 int ret;
1271
1272 if (drm_WARN_ON(&ptdev->base, req_mask & ~CSG_REQ_MASK))
1273 return -EINVAL;
1274
1275 ret = panthor_fw_wait_acks(&csg_iface->input->req,
1276 &csg_iface->output->ack,
1277 &ptdev->fw->req_waitqueue,
1278 req_mask, acked, timeout_ms);
1279
1280 /*
1281 * Check that all bits in the state field were updated, if any mismatch
1282 * then clear all bits in the state field. This allows code to do
1283 * (acked & CSG_STATE_MASK) and get the right value.
1284 */
1285
1286 if ((*acked & CSG_STATE_MASK) != CSG_STATE_MASK)
1287 *acked &= ~CSG_STATE_MASK;
1288
1289 return ret;
1290 }
1291
1292 /**
1293 * panthor_fw_ring_csg_doorbells() - Ring command stream group doorbells.
1294 * @ptdev: Device.
1295 * @csg_mask: Bitmask encoding the command stream group doorbells to ring.
1296 *
1297 * This function is toggling bits in the doorbell_req and ringing the
1298 * global doorbell. It doesn't require a user doorbell to be attached to
1299 * the group.
1300 */
panthor_fw_ring_csg_doorbells(struct panthor_device * ptdev,u32 csg_mask)1301 void panthor_fw_ring_csg_doorbells(struct panthor_device *ptdev, u32 csg_mask)
1302 {
1303 struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
1304
1305 panthor_fw_toggle_reqs(glb_iface, doorbell_req, doorbell_ack, csg_mask);
1306 gpu_write(ptdev, CSF_DOORBELL(CSF_GLB_DOORBELL_ID), 1);
1307 }
1308
panthor_fw_ping_work(struct work_struct * work)1309 static void panthor_fw_ping_work(struct work_struct *work)
1310 {
1311 struct panthor_fw *fw = container_of(work, struct panthor_fw, watchdog.ping_work.work);
1312 struct panthor_device *ptdev = fw->irq.ptdev;
1313 struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
1314 u32 acked;
1315 int ret;
1316
1317 if (panthor_device_reset_is_pending(ptdev))
1318 return;
1319
1320 panthor_fw_toggle_reqs(glb_iface, req, ack, GLB_PING);
1321 gpu_write(ptdev, CSF_DOORBELL(CSF_GLB_DOORBELL_ID), 1);
1322
1323 ret = panthor_fw_glb_wait_acks(ptdev, GLB_PING, &acked, 100);
1324 if (ret) {
1325 panthor_device_schedule_reset(ptdev);
1326 drm_err(&ptdev->base, "FW ping timeout, scheduling a reset");
1327 } else {
1328 mod_delayed_work(ptdev->reset.wq, &fw->watchdog.ping_work,
1329 msecs_to_jiffies(PING_INTERVAL_MS));
1330 }
1331 }
1332
1333 /**
1334 * panthor_fw_init() - Initialize FW related data.
1335 * @ptdev: Device.
1336 *
1337 * Return: 0 on success, a negative error code otherwise.
1338 */
panthor_fw_init(struct panthor_device * ptdev)1339 int panthor_fw_init(struct panthor_device *ptdev)
1340 {
1341 struct panthor_fw *fw;
1342 int ret, irq;
1343
1344 fw = drmm_kzalloc(&ptdev->base, sizeof(*fw), GFP_KERNEL);
1345 if (!fw)
1346 return -ENOMEM;
1347
1348 ptdev->fw = fw;
1349 init_waitqueue_head(&fw->req_waitqueue);
1350 INIT_LIST_HEAD(&fw->sections);
1351 INIT_DELAYED_WORK(&fw->watchdog.ping_work, panthor_fw_ping_work);
1352
1353 irq = platform_get_irq_byname(to_platform_device(ptdev->base.dev), "job");
1354 if (irq <= 0)
1355 return -ENODEV;
1356
1357 ret = panthor_request_job_irq(ptdev, &fw->irq, irq, 0);
1358 if (ret) {
1359 drm_err(&ptdev->base, "failed to request job irq");
1360 return ret;
1361 }
1362
1363 ret = panthor_gpu_l2_power_on(ptdev);
1364 if (ret)
1365 return ret;
1366
1367 fw->vm = panthor_vm_create(ptdev, true,
1368 0, SZ_4G,
1369 CSF_MCU_SHARED_REGION_START,
1370 CSF_MCU_SHARED_REGION_SIZE);
1371 if (IS_ERR(fw->vm)) {
1372 ret = PTR_ERR(fw->vm);
1373 fw->vm = NULL;
1374 goto err_unplug_fw;
1375 }
1376
1377 ret = panthor_fw_load(ptdev);
1378 if (ret)
1379 goto err_unplug_fw;
1380
1381 ret = panthor_vm_active(fw->vm);
1382 if (ret)
1383 goto err_unplug_fw;
1384
1385 ret = panthor_fw_start(ptdev);
1386 if (ret)
1387 goto err_unplug_fw;
1388
1389 ret = panthor_fw_init_ifaces(ptdev);
1390 if (ret)
1391 goto err_unplug_fw;
1392
1393 panthor_fw_init_global_iface(ptdev);
1394 return 0;
1395
1396 err_unplug_fw:
1397 panthor_fw_unplug(ptdev);
1398 return ret;
1399 }
1400
1401 MODULE_FIRMWARE("arm/mali/arch10.8/mali_csffw.bin");
1402