xref: /linux/drivers/gpu/drm/imagination/pvr_ccb.c (revision f6e8dc9edf963dbc99085e54f6ced6da9daa6100)
1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
2 /* Copyright (c) 2023 Imagination Technologies Ltd. */
3 
4 #include "pvr_ccb.h"
5 #include "pvr_device.h"
6 #include "pvr_drv.h"
7 #include "pvr_free_list.h"
8 #include "pvr_fw.h"
9 #include "pvr_gem.h"
10 #include "pvr_power.h"
11 
12 #include <drm/drm_managed.h>
13 #include <drm/drm_print.h>
14 #include <linux/compiler.h>
15 #include <linux/delay.h>
16 #include <linux/jiffies.h>
17 #include <linux/kernel.h>
18 #include <linux/mutex.h>
19 #include <linux/types.h>
20 #include <linux/workqueue.h>
21 
22 #define RESERVE_SLOT_TIMEOUT (1 * HZ) /* 1s */
23 #define RESERVE_SLOT_MIN_RETRIES 10
24 
25 static void
26 ccb_ctrl_init(void *cpu_ptr, void *priv)
27 {
28 	struct rogue_fwif_ccb_ctl *ctrl = cpu_ptr;
29 	struct pvr_ccb *pvr_ccb = priv;
30 
31 	ctrl->write_offset = 0;
32 	ctrl->read_offset = 0;
33 	ctrl->wrap_mask = pvr_ccb->num_cmds - 1;
34 	ctrl->cmd_size = pvr_ccb->cmd_size;
35 }
36 
37 /**
38  * pvr_ccb_init() - Initialise a CCB
39  * @pvr_dev: Device pointer.
40  * @pvr_ccb: Pointer to CCB structure to initialise.
41  * @num_cmds_log2: Log2 of number of commands in this CCB.
42  * @cmd_size: Command size for this CCB.
43  *
44  * Return:
45  *  * Zero on success, or
46  *  * Any error code returned by pvr_fw_object_create_and_map().
47  */
48 static int
49 pvr_ccb_init(struct pvr_device *pvr_dev, struct pvr_ccb *pvr_ccb,
50 	     u32 num_cmds_log2, size_t cmd_size)
51 {
52 	u32 num_cmds = 1 << num_cmds_log2;
53 	u32 ccb_size = num_cmds * cmd_size;
54 	int err;
55 
56 	pvr_ccb->num_cmds = num_cmds;
57 	pvr_ccb->cmd_size = cmd_size;
58 
59 	err = drmm_mutex_init(from_pvr_device(pvr_dev), &pvr_ccb->lock);
60 	if (err)
61 		return err;
62 
63 	/*
64 	 * Map CCB and control structure as uncached, so we don't have to flush
65 	 * CPU cache repeatedly when polling for space.
66 	 */
67 	pvr_ccb->ctrl = pvr_fw_object_create_and_map(pvr_dev, sizeof(*pvr_ccb->ctrl),
68 						     PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
69 						     ccb_ctrl_init, pvr_ccb, &pvr_ccb->ctrl_obj);
70 	if (IS_ERR(pvr_ccb->ctrl))
71 		return PTR_ERR(pvr_ccb->ctrl);
72 
73 	pvr_ccb->ccb = pvr_fw_object_create_and_map(pvr_dev, ccb_size,
74 						    PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
75 						    NULL, NULL, &pvr_ccb->ccb_obj);
76 	if (IS_ERR(pvr_ccb->ccb)) {
77 		err = PTR_ERR(pvr_ccb->ccb);
78 		goto err_free_ctrl;
79 	}
80 
81 	pvr_fw_object_get_fw_addr(pvr_ccb->ctrl_obj, &pvr_ccb->ctrl_fw_addr);
82 	pvr_fw_object_get_fw_addr(pvr_ccb->ccb_obj, &pvr_ccb->ccb_fw_addr);
83 
84 	WRITE_ONCE(pvr_ccb->ctrl->write_offset, 0);
85 	WRITE_ONCE(pvr_ccb->ctrl->read_offset, 0);
86 	WRITE_ONCE(pvr_ccb->ctrl->wrap_mask, num_cmds - 1);
87 	WRITE_ONCE(pvr_ccb->ctrl->cmd_size, cmd_size);
88 
89 	return 0;
90 
91 err_free_ctrl:
92 	pvr_fw_object_unmap_and_destroy(pvr_ccb->ctrl_obj);
93 
94 	return err;
95 }
96 
97 /**
98  * pvr_ccb_fini() - Release CCB structure
99  * @pvr_ccb: CCB to release.
100  */
101 void
102 pvr_ccb_fini(struct pvr_ccb *pvr_ccb)
103 {
104 	pvr_fw_object_unmap_and_destroy(pvr_ccb->ccb_obj);
105 	pvr_fw_object_unmap_and_destroy(pvr_ccb->ctrl_obj);
106 }
107 
108 /**
109  * pvr_ccb_slot_available_locked() - Test whether any slots are available in CCB
110  * @pvr_ccb: CCB to test.
111  * @write_offset: Address to store number of next available slot. May be %NULL.
112  *
113  * Caller must hold @pvr_ccb->lock.
114  *
115  * Return:
116  *  * %true if a slot is available, or
117  *  * %false if no slot is available.
118  */
119 static __always_inline bool
120 pvr_ccb_slot_available_locked(struct pvr_ccb *pvr_ccb, u32 *write_offset)
121 {
122 	struct rogue_fwif_ccb_ctl *ctrl = pvr_ccb->ctrl;
123 	u32 next_write_offset = (READ_ONCE(ctrl->write_offset) + 1) & READ_ONCE(ctrl->wrap_mask);
124 
125 	lockdep_assert_held(&pvr_ccb->lock);
126 
127 	if (READ_ONCE(ctrl->read_offset) != next_write_offset) {
128 		if (write_offset)
129 			*write_offset = next_write_offset;
130 		return true;
131 	}
132 
133 	return false;
134 }
135 
136 static void
137 process_fwccb_command(struct pvr_device *pvr_dev, struct rogue_fwif_fwccb_cmd *cmd)
138 {
139 	switch (cmd->cmd_type) {
140 	case ROGUE_FWIF_FWCCB_CMD_REQUEST_GPU_RESTART:
141 		pvr_power_reset(pvr_dev, false);
142 		break;
143 
144 	case ROGUE_FWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION:
145 		pvr_free_list_process_reconstruct_req(pvr_dev,
146 						      &cmd->cmd_data.cmd_freelists_reconstruction);
147 		break;
148 
149 	case ROGUE_FWIF_FWCCB_CMD_FREELIST_GROW:
150 		pvr_free_list_process_grow_req(pvr_dev, &cmd->cmd_data.cmd_free_list_gs);
151 		break;
152 
153 	default:
154 		drm_info(from_pvr_device(pvr_dev), "Received unknown FWCCB command %x\n",
155 			 cmd->cmd_type);
156 		break;
157 	}
158 }
159 
160 /**
161  * pvr_fwccb_process() - Process any pending FWCCB commands
162  * @pvr_dev: Target PowerVR device
163  */
164 void pvr_fwccb_process(struct pvr_device *pvr_dev)
165 {
166 	struct rogue_fwif_fwccb_cmd *fwccb = pvr_dev->fwccb.ccb;
167 	struct rogue_fwif_ccb_ctl *ctrl = pvr_dev->fwccb.ctrl;
168 	u32 read_offset;
169 
170 	mutex_lock(&pvr_dev->fwccb.lock);
171 
172 	while ((read_offset = READ_ONCE(ctrl->read_offset)) != READ_ONCE(ctrl->write_offset)) {
173 		struct rogue_fwif_fwccb_cmd cmd = fwccb[read_offset];
174 
175 		WRITE_ONCE(ctrl->read_offset, (read_offset + 1) & READ_ONCE(ctrl->wrap_mask));
176 
177 		/* Drop FWCCB lock while we process command. */
178 		mutex_unlock(&pvr_dev->fwccb.lock);
179 
180 		process_fwccb_command(pvr_dev, &cmd);
181 
182 		mutex_lock(&pvr_dev->fwccb.lock);
183 	}
184 
185 	mutex_unlock(&pvr_dev->fwccb.lock);
186 }
187 
188 /**
189  * pvr_kccb_capacity() - Returns the maximum number of usable KCCB slots.
190  * @pvr_dev: Target PowerVR device
191  *
192  * Return:
193  *  * The maximum number of active slots.
194  */
195 static u32 pvr_kccb_capacity(struct pvr_device *pvr_dev)
196 {
197 	/* Capacity is the number of slot minus one to cope with the wrapping
198 	 * mechanisms. If we were to use all slots, we might end up with
199 	 * read_offset == write_offset, which the FW considers as a KCCB-is-empty
200 	 * condition.
201 	 */
202 	return pvr_dev->kccb.slot_count - 1;
203 }
204 
205 /**
206  * pvr_kccb_used_slot_count_locked() - Get the number of used slots
207  * @pvr_dev: Device pointer.
208  *
209  * KCCB lock must be held.
210  *
211  * Return:
212  *  * The number of slots currently used.
213  */
214 static u32
215 pvr_kccb_used_slot_count_locked(struct pvr_device *pvr_dev)
216 {
217 	struct pvr_ccb *pvr_ccb = &pvr_dev->kccb.ccb;
218 	struct rogue_fwif_ccb_ctl *ctrl = pvr_ccb->ctrl;
219 	u32 wr_offset = READ_ONCE(ctrl->write_offset);
220 	u32 rd_offset = READ_ONCE(ctrl->read_offset);
221 	u32 used_count;
222 
223 	lockdep_assert_held(&pvr_ccb->lock);
224 
225 	if (wr_offset >= rd_offset)
226 		used_count = wr_offset - rd_offset;
227 	else
228 		used_count = wr_offset + pvr_dev->kccb.slot_count - rd_offset;
229 
230 	return used_count;
231 }
232 
233 /**
234  * pvr_kccb_send_cmd_reserved_powered() - Send command to the KCCB, with the PM ref
235  * held and a slot pre-reserved
236  * @pvr_dev: Device pointer.
237  * @cmd: Command to sent.
238  * @kccb_slot: Address to store the KCCB slot for this command. May be %NULL.
239  */
240 void
241 pvr_kccb_send_cmd_reserved_powered(struct pvr_device *pvr_dev,
242 				   struct rogue_fwif_kccb_cmd *cmd,
243 				   u32 *kccb_slot)
244 {
245 	struct pvr_ccb *pvr_ccb = &pvr_dev->kccb.ccb;
246 	struct rogue_fwif_kccb_cmd *kccb = pvr_ccb->ccb;
247 	struct rogue_fwif_ccb_ctl *ctrl = pvr_ccb->ctrl;
248 	u32 old_write_offset;
249 	u32 new_write_offset;
250 
251 	WARN_ON(pvr_dev->lost);
252 
253 	mutex_lock(&pvr_ccb->lock);
254 
255 	if (WARN_ON(!pvr_dev->kccb.reserved_count))
256 		goto out_unlock;
257 
258 	old_write_offset = READ_ONCE(ctrl->write_offset);
259 
260 	/* We reserved the slot, we should have one available. */
261 	if (WARN_ON(!pvr_ccb_slot_available_locked(pvr_ccb, &new_write_offset)))
262 		goto out_unlock;
263 
264 	memcpy(&kccb[old_write_offset], cmd,
265 	       sizeof(struct rogue_fwif_kccb_cmd));
266 	if (kccb_slot) {
267 		*kccb_slot = old_write_offset;
268 		/* Clear return status for this slot. */
269 		WRITE_ONCE(pvr_dev->kccb.rtn[old_write_offset],
270 			   ROGUE_FWIF_KCCB_RTN_SLOT_NO_RESPONSE);
271 	}
272 	mb(); /* memory barrier */
273 	WRITE_ONCE(ctrl->write_offset, new_write_offset);
274 	pvr_dev->kccb.reserved_count--;
275 
276 	/* Kick MTS */
277 	pvr_fw_mts_schedule(pvr_dev,
278 			    PVR_FWIF_DM_GP & ~ROGUE_CR_MTS_SCHEDULE_DM_CLRMSK);
279 
280 out_unlock:
281 	mutex_unlock(&pvr_ccb->lock);
282 }
283 
284 /**
285  * pvr_kccb_try_reserve_slot() - Try to reserve a KCCB slot
286  * @pvr_dev: Device pointer.
287  *
288  * Return:
289  *  * true if a KCCB slot was reserved, or
290  *  * false otherwise.
291  */
292 static bool pvr_kccb_try_reserve_slot(struct pvr_device *pvr_dev)
293 {
294 	bool reserved = false;
295 	u32 used_count;
296 
297 	mutex_lock(&pvr_dev->kccb.ccb.lock);
298 
299 	used_count = pvr_kccb_used_slot_count_locked(pvr_dev);
300 	if (pvr_dev->kccb.reserved_count < pvr_kccb_capacity(pvr_dev) - used_count) {
301 		pvr_dev->kccb.reserved_count++;
302 		reserved = true;
303 	}
304 
305 	mutex_unlock(&pvr_dev->kccb.ccb.lock);
306 
307 	return reserved;
308 }
309 
310 /**
311  * pvr_kccb_reserve_slot_sync() - Try to reserve a slot synchronously
312  * @pvr_dev: Device pointer.
313  *
314  * Return:
315  *  * 0 on success, or
316  *  * -EBUSY if no slots were reserved after %RESERVE_SLOT_TIMEOUT, with a minimum of
317  *    %RESERVE_SLOT_MIN_RETRIES retries.
318  */
319 static int pvr_kccb_reserve_slot_sync(struct pvr_device *pvr_dev)
320 {
321 	unsigned long start_timestamp = jiffies;
322 	bool reserved = false;
323 	u32 retries = 0;
324 
325 	while (time_before(jiffies, start_timestamp + RESERVE_SLOT_TIMEOUT) ||
326 	       retries < RESERVE_SLOT_MIN_RETRIES) {
327 		reserved = pvr_kccb_try_reserve_slot(pvr_dev);
328 		if (reserved)
329 			break;
330 
331 		usleep_range(1, 50);
332 
333 		if (retries < U32_MAX)
334 			retries++;
335 	}
336 
337 	return reserved ? 0 : -EBUSY;
338 }
339 
340 /**
341  * pvr_kccb_send_cmd_powered() - Send command to the KCCB, with a PM ref held
342  * @pvr_dev: Device pointer.
343  * @cmd: Command to sent.
344  * @kccb_slot: Address to store the KCCB slot for this command. May be %NULL.
345  *
346  * Returns:
347  *  * Zero on success, or
348  *  * -EBUSY if timeout while waiting for a free KCCB slot.
349  */
350 int
351 pvr_kccb_send_cmd_powered(struct pvr_device *pvr_dev, struct rogue_fwif_kccb_cmd *cmd,
352 			  u32 *kccb_slot)
353 {
354 	int err;
355 
356 	err = pvr_kccb_reserve_slot_sync(pvr_dev);
357 	if (err)
358 		return err;
359 
360 	pvr_kccb_send_cmd_reserved_powered(pvr_dev, cmd, kccb_slot);
361 	return 0;
362 }
363 
364 /**
365  * pvr_kccb_send_cmd() - Send command to the KCCB
366  * @pvr_dev: Device pointer.
367  * @cmd: Command to sent.
368  * @kccb_slot: Address to store the KCCB slot for this command. May be %NULL.
369  *
370  * Returns:
371  *  * Zero on success, or
372  *  * -EBUSY if timeout while waiting for a free KCCB slot.
373  */
374 int
375 pvr_kccb_send_cmd(struct pvr_device *pvr_dev, struct rogue_fwif_kccb_cmd *cmd,
376 		  u32 *kccb_slot)
377 {
378 	int err;
379 
380 	err = pvr_power_get(pvr_dev);
381 	if (err)
382 		return err;
383 
384 	err = pvr_kccb_send_cmd_powered(pvr_dev, cmd, kccb_slot);
385 
386 	pvr_power_put(pvr_dev);
387 
388 	return err;
389 }
390 
391 /**
392  * pvr_kccb_wait_for_completion() - Wait for a KCCB command to complete
393  * @pvr_dev: Device pointer.
394  * @slot_nr: KCCB slot to wait on.
395  * @timeout: Timeout length (in jiffies).
396  * @rtn_out: Location to store KCCB command result. May be %NULL.
397  *
398  * Returns:
399  *  * Zero on success, or
400  *  * -ETIMEDOUT on timeout.
401  */
402 int
403 pvr_kccb_wait_for_completion(struct pvr_device *pvr_dev, u32 slot_nr,
404 			     u32 timeout, u32 *rtn_out)
405 {
406 	int ret = wait_event_timeout(pvr_dev->kccb.rtn_q, READ_ONCE(pvr_dev->kccb.rtn[slot_nr]) &
407 				     ROGUE_FWIF_KCCB_RTN_SLOT_CMD_EXECUTED, timeout);
408 
409 	if (ret && rtn_out)
410 		*rtn_out = READ_ONCE(pvr_dev->kccb.rtn[slot_nr]);
411 
412 	return ret ? 0 : -ETIMEDOUT;
413 }
414 
415 /**
416  * pvr_kccb_is_idle() - Returns whether the device's KCCB is idle
417  * @pvr_dev: Device pointer
418  *
419  * Returns:
420  *  * %true if the KCCB is idle (contains no commands), or
421  *  * %false if the KCCB contains pending commands.
422  */
423 bool
424 pvr_kccb_is_idle(struct pvr_device *pvr_dev)
425 {
426 	struct rogue_fwif_ccb_ctl *ctrl = pvr_dev->kccb.ccb.ctrl;
427 	bool idle;
428 
429 	mutex_lock(&pvr_dev->kccb.ccb.lock);
430 
431 	idle = (READ_ONCE(ctrl->write_offset) == READ_ONCE(ctrl->read_offset));
432 
433 	mutex_unlock(&pvr_dev->kccb.ccb.lock);
434 
435 	return idle;
436 }
437 
438 static const char *
439 pvr_kccb_fence_get_driver_name(struct dma_fence *f)
440 {
441 	return PVR_DRIVER_NAME;
442 }
443 
444 static const char *
445 pvr_kccb_fence_get_timeline_name(struct dma_fence *f)
446 {
447 	return "kccb";
448 }
449 
450 static const struct dma_fence_ops pvr_kccb_fence_ops = {
451 	.get_driver_name = pvr_kccb_fence_get_driver_name,
452 	.get_timeline_name = pvr_kccb_fence_get_timeline_name,
453 };
454 
455 /**
456  * struct pvr_kccb_fence - Fence object used to wait for a KCCB slot
457  */
458 struct pvr_kccb_fence {
459 	/** @base: Base dma_fence object. */
460 	struct dma_fence base;
461 
462 	/** @node: Node used to insert the fence in the pvr_device::kccb::waiters list. */
463 	struct list_head node;
464 };
465 
466 /**
467  * pvr_kccb_wake_up_waiters() - Check the KCCB waiters
468  * @pvr_dev: Target PowerVR device
469  *
470  * Signal as many KCCB fences as we have slots available.
471  */
472 void pvr_kccb_wake_up_waiters(struct pvr_device *pvr_dev)
473 {
474 	struct pvr_kccb_fence *fence, *tmp_fence;
475 	u32 used_count, available_count;
476 
477 	/* Wake up those waiting for KCCB slot execution. */
478 	wake_up_all(&pvr_dev->kccb.rtn_q);
479 
480 	/* Then iterate over all KCCB fences and signal as many as we can. */
481 	mutex_lock(&pvr_dev->kccb.ccb.lock);
482 	used_count = pvr_kccb_used_slot_count_locked(pvr_dev);
483 
484 	if (WARN_ON(used_count + pvr_dev->kccb.reserved_count > pvr_kccb_capacity(pvr_dev)))
485 		goto out_unlock;
486 
487 	available_count = pvr_kccb_capacity(pvr_dev) - used_count - pvr_dev->kccb.reserved_count;
488 	list_for_each_entry_safe(fence, tmp_fence, &pvr_dev->kccb.waiters, node) {
489 		if (!available_count)
490 			break;
491 
492 		list_del(&fence->node);
493 		pvr_dev->kccb.reserved_count++;
494 		available_count--;
495 		dma_fence_signal(&fence->base);
496 		dma_fence_put(&fence->base);
497 	}
498 
499 out_unlock:
500 	mutex_unlock(&pvr_dev->kccb.ccb.lock);
501 }
502 
503 /**
504  * pvr_kccb_fini() - Cleanup device KCCB
505  * @pvr_dev: Target PowerVR device
506  */
507 void pvr_kccb_fini(struct pvr_device *pvr_dev)
508 {
509 	pvr_ccb_fini(&pvr_dev->kccb.ccb);
510 	WARN_ON(!list_empty(&pvr_dev->kccb.waiters));
511 	WARN_ON(pvr_dev->kccb.reserved_count);
512 }
513 
514 /**
515  * pvr_kccb_init() - Initialise device KCCB
516  * @pvr_dev: Target PowerVR device
517  *
518  * Returns:
519  *  * 0 on success, or
520  *  * Any error returned by pvr_ccb_init().
521  */
522 int
523 pvr_kccb_init(struct pvr_device *pvr_dev)
524 {
525 	pvr_dev->kccb.slot_count = 1 << ROGUE_FWIF_KCCB_NUMCMDS_LOG2_DEFAULT;
526 	INIT_LIST_HEAD(&pvr_dev->kccb.waiters);
527 	pvr_dev->kccb.fence_ctx.id = dma_fence_context_alloc(1);
528 	spin_lock_init(&pvr_dev->kccb.fence_ctx.lock);
529 
530 	return pvr_ccb_init(pvr_dev, &pvr_dev->kccb.ccb,
531 			    ROGUE_FWIF_KCCB_NUMCMDS_LOG2_DEFAULT,
532 			    sizeof(struct rogue_fwif_kccb_cmd));
533 }
534 
535 /**
536  * pvr_kccb_fence_alloc() - Allocate a pvr_kccb_fence object
537  *
538  * Return:
539  *  * NULL if the allocation fails, or
540  *  * A valid dma_fence pointer otherwise.
541  */
542 struct dma_fence *pvr_kccb_fence_alloc(void)
543 {
544 	struct pvr_kccb_fence *kccb_fence;
545 
546 	kccb_fence = kzalloc(sizeof(*kccb_fence), GFP_KERNEL);
547 	if (!kccb_fence)
548 		return NULL;
549 
550 	return &kccb_fence->base;
551 }
552 
553 /**
554  * pvr_kccb_fence_put() - Drop a KCCB fence reference
555  * @fence: The fence to drop the reference on.
556  *
557  * If the fence hasn't been initialized yet, dma_fence_free() is called. This
558  * way we have a single function taking care of both cases.
559  */
560 void pvr_kccb_fence_put(struct dma_fence *fence)
561 {
562 	if (!fence)
563 		return;
564 
565 	if (!fence->ops) {
566 		dma_fence_free(fence);
567 	} else {
568 		WARN_ON(fence->ops != &pvr_kccb_fence_ops);
569 		dma_fence_put(fence);
570 	}
571 }
572 
573 /**
574  * pvr_kccb_reserve_slot() - Reserve a KCCB slot for later use
575  * @pvr_dev: Target PowerVR device
576  * @f: KCCB fence object previously allocated with pvr_kccb_fence_alloc()
577  *
578  * Try to reserve a KCCB slot, and if there's no slot available,
579  * initializes the fence object and queue it to the waiters list.
580  *
581  * If NULL is returned, that means the slot is reserved. In that case,
582  * the @f is freed and shouldn't be accessed after that point.
583  *
584  * Return:
585  *  * NULL if a slot was available directly, or
586  *  * A valid dma_fence object to wait on if no slot was available.
587  */
588 struct dma_fence *
589 pvr_kccb_reserve_slot(struct pvr_device *pvr_dev, struct dma_fence *f)
590 {
591 	struct pvr_kccb_fence *fence = container_of(f, struct pvr_kccb_fence, base);
592 	struct dma_fence *out_fence = NULL;
593 	u32 used_count;
594 
595 	mutex_lock(&pvr_dev->kccb.ccb.lock);
596 
597 	used_count = pvr_kccb_used_slot_count_locked(pvr_dev);
598 	if (pvr_dev->kccb.reserved_count >= pvr_kccb_capacity(pvr_dev) - used_count) {
599 		dma_fence_init(&fence->base, &pvr_kccb_fence_ops,
600 			       &pvr_dev->kccb.fence_ctx.lock,
601 			       pvr_dev->kccb.fence_ctx.id,
602 			       atomic_inc_return(&pvr_dev->kccb.fence_ctx.seqno));
603 		out_fence = dma_fence_get(&fence->base);
604 		list_add_tail(&fence->node, &pvr_dev->kccb.waiters);
605 	} else {
606 		pvr_kccb_fence_put(f);
607 		pvr_dev->kccb.reserved_count++;
608 	}
609 
610 	mutex_unlock(&pvr_dev->kccb.ccb.lock);
611 
612 	return out_fence;
613 }
614 
615 /**
616  * pvr_kccb_release_slot() - Release a KCCB slot reserved with
617  * pvr_kccb_reserve_slot()
618  * @pvr_dev: Target PowerVR device
619  *
620  * Should only be called if something failed after the
621  * pvr_kccb_reserve_slot() call and you know you won't call
622  * pvr_kccb_send_cmd_reserved().
623  */
624 void pvr_kccb_release_slot(struct pvr_device *pvr_dev)
625 {
626 	mutex_lock(&pvr_dev->kccb.ccb.lock);
627 	if (!WARN_ON(!pvr_dev->kccb.reserved_count))
628 		pvr_dev->kccb.reserved_count--;
629 	mutex_unlock(&pvr_dev->kccb.ccb.lock);
630 }
631 
632 /**
633  * pvr_fwccb_init() - Initialise device FWCCB
634  * @pvr_dev: Target PowerVR device
635  *
636  * Returns:
637  *  * 0 on success, or
638  *  * Any error returned by pvr_ccb_init().
639  */
640 int
641 pvr_fwccb_init(struct pvr_device *pvr_dev)
642 {
643 	return pvr_ccb_init(pvr_dev, &pvr_dev->fwccb,
644 			    ROGUE_FWIF_FWCCB_NUMCMDS_LOG2,
645 			    sizeof(struct rogue_fwif_fwccb_cmd));
646 }
647