xref: /linux/drivers/gpu/drm/imagination/pvr_ccb.c (revision 5ea5b6ff0d63aef1dc3fb25445acea183f61a934)
1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
2 /* Copyright (c) 2023 Imagination Technologies Ltd. */
3 
4 #include "pvr_ccb.h"
5 #include "pvr_device.h"
6 #include "pvr_drv.h"
7 #include "pvr_free_list.h"
8 #include "pvr_fw.h"
9 #include "pvr_gem.h"
10 #include "pvr_power.h"
11 
12 #include <drm/drm_managed.h>
13 #include <drm/drm_print.h>
14 #include <linux/compiler.h>
15 #include <linux/delay.h>
16 #include <linux/jiffies.h>
17 #include <linux/kernel.h>
18 #include <linux/mutex.h>
19 #include <linux/types.h>
20 #include <linux/workqueue.h>
21 
22 #define RESERVE_SLOT_TIMEOUT (1 * HZ) /* 1s */
23 #define RESERVE_SLOT_MIN_RETRIES 10
24 
25 static void
26 ccb_ctrl_init(void *cpu_ptr, void *priv)
27 {
28 	struct rogue_fwif_ccb_ctl *ctrl = cpu_ptr;
29 	struct pvr_ccb *pvr_ccb = priv;
30 
31 	ctrl->write_offset = 0;
32 	ctrl->read_offset = 0;
33 	ctrl->wrap_mask = pvr_ccb->num_cmds - 1;
34 	ctrl->cmd_size = pvr_ccb->cmd_size;
35 }
36 
37 /**
38  * pvr_ccb_init() - Initialise a CCB
39  * @pvr_dev: Device pointer.
40  * @pvr_ccb: Pointer to CCB structure to initialise.
41  * @num_cmds_log2: Log2 of number of commands in this CCB.
42  * @cmd_size: Command size for this CCB.
43  *
44  * Return:
45  *  * Zero on success, or
46  *  * Any error code returned by pvr_fw_object_create_and_map().
47  */
48 static int
49 pvr_ccb_init(struct pvr_device *pvr_dev, struct pvr_ccb *pvr_ccb,
50 	     u32 num_cmds_log2, size_t cmd_size)
51 {
52 	u32 num_cmds = 1 << num_cmds_log2;
53 	u32 ccb_size = num_cmds * cmd_size;
54 	int err;
55 
56 	pvr_ccb->num_cmds = num_cmds;
57 	pvr_ccb->cmd_size = cmd_size;
58 
59 	err = drmm_mutex_init(from_pvr_device(pvr_dev), &pvr_ccb->lock);
60 	if (err)
61 		return err;
62 
63 	/*
64 	 * Map CCB and control structure as uncached, so we don't have to flush
65 	 * CPU cache repeatedly when polling for space.
66 	 */
67 	pvr_ccb->ctrl = pvr_fw_object_create_and_map(pvr_dev, sizeof(*pvr_ccb->ctrl),
68 						     PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
69 						     ccb_ctrl_init, pvr_ccb, &pvr_ccb->ctrl_obj);
70 	if (IS_ERR(pvr_ccb->ctrl))
71 		return PTR_ERR(pvr_ccb->ctrl);
72 
73 	pvr_ccb->ccb = pvr_fw_object_create_and_map(pvr_dev, ccb_size,
74 						    PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
75 						    NULL, NULL, &pvr_ccb->ccb_obj);
76 	if (IS_ERR(pvr_ccb->ccb)) {
77 		err = PTR_ERR(pvr_ccb->ccb);
78 		goto err_free_ctrl;
79 	}
80 
81 	pvr_fw_object_get_fw_addr(pvr_ccb->ctrl_obj, &pvr_ccb->ctrl_fw_addr);
82 	pvr_fw_object_get_fw_addr(pvr_ccb->ccb_obj, &pvr_ccb->ccb_fw_addr);
83 
84 	WRITE_ONCE(pvr_ccb->ctrl->write_offset, 0);
85 	WRITE_ONCE(pvr_ccb->ctrl->read_offset, 0);
86 	WRITE_ONCE(pvr_ccb->ctrl->wrap_mask, num_cmds - 1);
87 	WRITE_ONCE(pvr_ccb->ctrl->cmd_size, cmd_size);
88 
89 	return 0;
90 
91 err_free_ctrl:
92 	pvr_fw_object_unmap_and_destroy(pvr_ccb->ctrl_obj);
93 
94 	return err;
95 }
96 
97 /**
98  * pvr_ccb_fini() - Release CCB structure
99  * @pvr_ccb: CCB to release.
100  */
101 void
102 pvr_ccb_fini(struct pvr_ccb *pvr_ccb)
103 {
104 	pvr_fw_object_unmap_and_destroy(pvr_ccb->ccb_obj);
105 	pvr_fw_object_unmap_and_destroy(pvr_ccb->ctrl_obj);
106 }
107 
108 /**
109  * pvr_ccb_slot_available_locked() - Test whether any slots are available in CCB
110  * @pvr_ccb: CCB to test.
111  * @write_offset: Address to store number of next available slot. May be %NULL.
112  *
113  * Caller must hold @pvr_ccb->lock.
114  *
115  * Return:
116  *  * %true if a slot is available, or
117  *  * %false if no slot is available.
118  */
119 static __always_inline bool
120 pvr_ccb_slot_available_locked(struct pvr_ccb *pvr_ccb, u32 *write_offset)
121 {
122 	struct rogue_fwif_ccb_ctl *ctrl = pvr_ccb->ctrl;
123 	u32 next_write_offset = (READ_ONCE(ctrl->write_offset) + 1) & READ_ONCE(ctrl->wrap_mask);
124 
125 	lockdep_assert_held(&pvr_ccb->lock);
126 
127 	if (READ_ONCE(ctrl->read_offset) != next_write_offset) {
128 		if (write_offset)
129 			*write_offset = next_write_offset;
130 		return true;
131 	}
132 
133 	return false;
134 }
135 
136 static void
137 process_fwccb_command(struct pvr_device *pvr_dev, struct rogue_fwif_fwccb_cmd *cmd)
138 {
139 	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
140 
141 	if ((cmd->cmd_type & ROGUE_CMD_MAGIC_DWORD_MASK) != ROGUE_CMD_MAGIC_DWORD_SHIFTED) {
142 		drm_warn_once(drm_dev, "Received FWCCB command with bad magic value; ignoring (type=0x%08x)\n",
143 			      cmd->cmd_type);
144 		return;
145 	}
146 
147 	switch (cmd->cmd_type) {
148 	case ROGUE_FWIF_FWCCB_CMD_REQUEST_GPU_RESTART:
149 		pvr_power_reset(pvr_dev, false);
150 		break;
151 
152 	case ROGUE_FWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION:
153 		pvr_free_list_process_reconstruct_req(pvr_dev,
154 						      &cmd->cmd_data.cmd_freelists_reconstruction);
155 		break;
156 
157 	case ROGUE_FWIF_FWCCB_CMD_FREELIST_GROW:
158 		pvr_free_list_process_grow_req(pvr_dev, &cmd->cmd_data.cmd_free_list_gs);
159 		break;
160 
161 	case ROGUE_FWIF_FWCCB_CMD_UPDATE_STATS:
162 		/*
163 		 * We currently have no infrastructure for processing these
164 		 * stats. It may be added in the future, but for now just
165 		 * suppress the "unknown" warning when receiving this command.
166 		 */
167 		break;
168 
169 	default:
170 		drm_info(drm_dev, "Received unknown FWCCB command (type=%d)\n",
171 			 cmd->cmd_type & ~ROGUE_CMD_MAGIC_DWORD_MASK);
172 		break;
173 	}
174 }
175 
176 /**
177  * pvr_fwccb_process() - Process any pending FWCCB commands
178  * @pvr_dev: Target PowerVR device
179  */
180 void pvr_fwccb_process(struct pvr_device *pvr_dev)
181 {
182 	struct rogue_fwif_fwccb_cmd *fwccb = pvr_dev->fwccb.ccb;
183 	struct rogue_fwif_ccb_ctl *ctrl = pvr_dev->fwccb.ctrl;
184 	u32 read_offset;
185 
186 	mutex_lock(&pvr_dev->fwccb.lock);
187 
188 	while ((read_offset = READ_ONCE(ctrl->read_offset)) != READ_ONCE(ctrl->write_offset)) {
189 		struct rogue_fwif_fwccb_cmd cmd = fwccb[read_offset];
190 
191 		WRITE_ONCE(ctrl->read_offset, (read_offset + 1) & READ_ONCE(ctrl->wrap_mask));
192 
193 		/* Drop FWCCB lock while we process command. */
194 		mutex_unlock(&pvr_dev->fwccb.lock);
195 
196 		process_fwccb_command(pvr_dev, &cmd);
197 
198 		mutex_lock(&pvr_dev->fwccb.lock);
199 	}
200 
201 	mutex_unlock(&pvr_dev->fwccb.lock);
202 }
203 
204 /**
205  * pvr_kccb_capacity() - Returns the maximum number of usable KCCB slots.
206  * @pvr_dev: Target PowerVR device
207  *
208  * Return:
209  *  * The maximum number of active slots.
210  */
211 static u32 pvr_kccb_capacity(struct pvr_device *pvr_dev)
212 {
213 	/* Capacity is the number of slot minus one to cope with the wrapping
214 	 * mechanisms. If we were to use all slots, we might end up with
215 	 * read_offset == write_offset, which the FW considers as a KCCB-is-empty
216 	 * condition.
217 	 */
218 	return pvr_dev->kccb.slot_count - 1;
219 }
220 
221 /**
222  * pvr_kccb_used_slot_count_locked() - Get the number of used slots
223  * @pvr_dev: Device pointer.
224  *
225  * KCCB lock must be held.
226  *
227  * Return:
228  *  * The number of slots currently used.
229  */
230 static u32
231 pvr_kccb_used_slot_count_locked(struct pvr_device *pvr_dev)
232 {
233 	struct pvr_ccb *pvr_ccb = &pvr_dev->kccb.ccb;
234 	struct rogue_fwif_ccb_ctl *ctrl = pvr_ccb->ctrl;
235 	u32 wr_offset = READ_ONCE(ctrl->write_offset);
236 	u32 rd_offset = READ_ONCE(ctrl->read_offset);
237 	u32 used_count;
238 
239 	lockdep_assert_held(&pvr_ccb->lock);
240 
241 	if (wr_offset >= rd_offset)
242 		used_count = wr_offset - rd_offset;
243 	else
244 		used_count = wr_offset + pvr_dev->kccb.slot_count - rd_offset;
245 
246 	return used_count;
247 }
248 
249 /**
250  * pvr_kccb_send_cmd_reserved_powered() - Send command to the KCCB, with the PM ref
251  * held and a slot pre-reserved
252  * @pvr_dev: Device pointer.
253  * @cmd: Command to sent.
254  * @kccb_slot: Address to store the KCCB slot for this command. May be %NULL.
255  */
256 void
257 pvr_kccb_send_cmd_reserved_powered(struct pvr_device *pvr_dev,
258 				   struct rogue_fwif_kccb_cmd *cmd,
259 				   u32 *kccb_slot)
260 {
261 	struct pvr_ccb *pvr_ccb = &pvr_dev->kccb.ccb;
262 	struct rogue_fwif_kccb_cmd *kccb = pvr_ccb->ccb;
263 	struct rogue_fwif_ccb_ctl *ctrl = pvr_ccb->ctrl;
264 	u32 old_write_offset;
265 	u32 new_write_offset;
266 
267 	WARN_ON(pvr_dev->lost);
268 
269 	mutex_lock(&pvr_ccb->lock);
270 
271 	if (WARN_ON(!pvr_dev->kccb.reserved_count))
272 		goto out_unlock;
273 
274 	old_write_offset = READ_ONCE(ctrl->write_offset);
275 
276 	/* We reserved the slot, we should have one available. */
277 	if (WARN_ON(!pvr_ccb_slot_available_locked(pvr_ccb, &new_write_offset)))
278 		goto out_unlock;
279 
280 	memcpy(&kccb[old_write_offset], cmd,
281 	       sizeof(struct rogue_fwif_kccb_cmd));
282 	if (kccb_slot) {
283 		*kccb_slot = old_write_offset;
284 		/* Clear return status for this slot. */
285 		WRITE_ONCE(pvr_dev->kccb.rtn[old_write_offset],
286 			   ROGUE_FWIF_KCCB_RTN_SLOT_NO_RESPONSE);
287 	}
288 	mb(); /* memory barrier */
289 	WRITE_ONCE(ctrl->write_offset, new_write_offset);
290 	pvr_dev->kccb.reserved_count--;
291 
292 	/* Kick MTS */
293 	pvr_fw_mts_schedule(pvr_dev,
294 			    PVR_FWIF_DM_GP & ~ROGUE_CR_MTS_SCHEDULE_DM_CLRMSK);
295 
296 out_unlock:
297 	mutex_unlock(&pvr_ccb->lock);
298 }
299 
300 /**
301  * pvr_kccb_try_reserve_slot() - Try to reserve a KCCB slot
302  * @pvr_dev: Device pointer.
303  *
304  * Return:
305  *  * true if a KCCB slot was reserved, or
306  *  * false otherwise.
307  */
308 static bool pvr_kccb_try_reserve_slot(struct pvr_device *pvr_dev)
309 {
310 	bool reserved = false;
311 	u32 used_count;
312 
313 	mutex_lock(&pvr_dev->kccb.ccb.lock);
314 
315 	used_count = pvr_kccb_used_slot_count_locked(pvr_dev);
316 	if (pvr_dev->kccb.reserved_count < pvr_kccb_capacity(pvr_dev) - used_count) {
317 		pvr_dev->kccb.reserved_count++;
318 		reserved = true;
319 	}
320 
321 	mutex_unlock(&pvr_dev->kccb.ccb.lock);
322 
323 	return reserved;
324 }
325 
326 /**
327  * pvr_kccb_reserve_slot_sync() - Try to reserve a slot synchronously
328  * @pvr_dev: Device pointer.
329  *
330  * Return:
331  *  * 0 on success, or
332  *  * -EBUSY if no slots were reserved after %RESERVE_SLOT_TIMEOUT, with a minimum of
333  *    %RESERVE_SLOT_MIN_RETRIES retries.
334  */
335 static int pvr_kccb_reserve_slot_sync(struct pvr_device *pvr_dev)
336 {
337 	unsigned long start_timestamp = jiffies;
338 	bool reserved = false;
339 	u32 retries = 0;
340 
341 	while (time_before(jiffies, start_timestamp + RESERVE_SLOT_TIMEOUT) ||
342 	       retries < RESERVE_SLOT_MIN_RETRIES) {
343 		reserved = pvr_kccb_try_reserve_slot(pvr_dev);
344 		if (reserved)
345 			break;
346 
347 		usleep_range(1, 50);
348 
349 		if (retries < U32_MAX)
350 			retries++;
351 	}
352 
353 	return reserved ? 0 : -EBUSY;
354 }
355 
356 /**
357  * pvr_kccb_send_cmd_powered() - Send command to the KCCB, with a PM ref held
358  * @pvr_dev: Device pointer.
359  * @cmd: Command to sent.
360  * @kccb_slot: Address to store the KCCB slot for this command. May be %NULL.
361  *
362  * Returns:
363  *  * Zero on success, or
364  *  * -EBUSY if timeout while waiting for a free KCCB slot.
365  */
366 int
367 pvr_kccb_send_cmd_powered(struct pvr_device *pvr_dev, struct rogue_fwif_kccb_cmd *cmd,
368 			  u32 *kccb_slot)
369 {
370 	int err;
371 
372 	err = pvr_kccb_reserve_slot_sync(pvr_dev);
373 	if (err)
374 		return err;
375 
376 	pvr_kccb_send_cmd_reserved_powered(pvr_dev, cmd, kccb_slot);
377 	return 0;
378 }
379 
380 /**
381  * pvr_kccb_send_cmd() - Send command to the KCCB
382  * @pvr_dev: Device pointer.
383  * @cmd: Command to sent.
384  * @kccb_slot: Address to store the KCCB slot for this command. May be %NULL.
385  *
386  * Returns:
387  *  * Zero on success, or
388  *  * -EBUSY if timeout while waiting for a free KCCB slot.
389  */
390 int
391 pvr_kccb_send_cmd(struct pvr_device *pvr_dev, struct rogue_fwif_kccb_cmd *cmd,
392 		  u32 *kccb_slot)
393 {
394 	int err;
395 
396 	err = pvr_power_get(pvr_dev);
397 	if (err)
398 		return err;
399 
400 	err = pvr_kccb_send_cmd_powered(pvr_dev, cmd, kccb_slot);
401 
402 	pvr_power_put(pvr_dev);
403 
404 	return err;
405 }
406 
407 /**
408  * pvr_kccb_wait_for_completion() - Wait for a KCCB command to complete
409  * @pvr_dev: Device pointer.
410  * @slot_nr: KCCB slot to wait on.
411  * @timeout: Timeout length (in jiffies).
412  * @rtn_out: Location to store KCCB command result. May be %NULL.
413  *
414  * Returns:
415  *  * Zero on success, or
416  *  * -ETIMEDOUT on timeout.
417  */
418 int
419 pvr_kccb_wait_for_completion(struct pvr_device *pvr_dev, u32 slot_nr,
420 			     u32 timeout, u32 *rtn_out)
421 {
422 	int ret = wait_event_timeout(pvr_dev->kccb.rtn_q, READ_ONCE(pvr_dev->kccb.rtn[slot_nr]) &
423 				     ROGUE_FWIF_KCCB_RTN_SLOT_CMD_EXECUTED, timeout);
424 
425 	if (ret && rtn_out)
426 		*rtn_out = READ_ONCE(pvr_dev->kccb.rtn[slot_nr]);
427 
428 	return ret ? 0 : -ETIMEDOUT;
429 }
430 
431 /**
432  * pvr_kccb_is_idle() - Returns whether the device's KCCB is idle
433  * @pvr_dev: Device pointer
434  *
435  * Returns:
436  *  * %true if the KCCB is idle (contains no commands), or
437  *  * %false if the KCCB contains pending commands.
438  */
439 bool
440 pvr_kccb_is_idle(struct pvr_device *pvr_dev)
441 {
442 	struct rogue_fwif_ccb_ctl *ctrl = pvr_dev->kccb.ccb.ctrl;
443 	bool idle;
444 
445 	mutex_lock(&pvr_dev->kccb.ccb.lock);
446 
447 	idle = (READ_ONCE(ctrl->write_offset) == READ_ONCE(ctrl->read_offset));
448 
449 	mutex_unlock(&pvr_dev->kccb.ccb.lock);
450 
451 	return idle;
452 }
453 
454 static const char *
455 pvr_kccb_fence_get_driver_name(struct dma_fence *f)
456 {
457 	return PVR_DRIVER_NAME;
458 }
459 
460 static const char *
461 pvr_kccb_fence_get_timeline_name(struct dma_fence *f)
462 {
463 	return "kccb";
464 }
465 
466 static const struct dma_fence_ops pvr_kccb_fence_ops = {
467 	.get_driver_name = pvr_kccb_fence_get_driver_name,
468 	.get_timeline_name = pvr_kccb_fence_get_timeline_name,
469 };
470 
471 /**
472  * struct pvr_kccb_fence - Fence object used to wait for a KCCB slot
473  */
474 struct pvr_kccb_fence {
475 	/** @base: Base dma_fence object. */
476 	struct dma_fence base;
477 
478 	/** @node: Node used to insert the fence in the pvr_device::kccb::waiters list. */
479 	struct list_head node;
480 };
481 
482 /**
483  * pvr_kccb_wake_up_waiters() - Check the KCCB waiters
484  * @pvr_dev: Target PowerVR device
485  *
486  * Signal as many KCCB fences as we have slots available.
487  */
488 void pvr_kccb_wake_up_waiters(struct pvr_device *pvr_dev)
489 {
490 	struct pvr_kccb_fence *fence, *tmp_fence;
491 	u32 used_count, available_count;
492 
493 	/* Wake up those waiting for KCCB slot execution. */
494 	wake_up_all(&pvr_dev->kccb.rtn_q);
495 
496 	/* Then iterate over all KCCB fences and signal as many as we can. */
497 	mutex_lock(&pvr_dev->kccb.ccb.lock);
498 	used_count = pvr_kccb_used_slot_count_locked(pvr_dev);
499 
500 	if (WARN_ON(used_count + pvr_dev->kccb.reserved_count > pvr_kccb_capacity(pvr_dev)))
501 		goto out_unlock;
502 
503 	available_count = pvr_kccb_capacity(pvr_dev) - used_count - pvr_dev->kccb.reserved_count;
504 	list_for_each_entry_safe(fence, tmp_fence, &pvr_dev->kccb.waiters, node) {
505 		if (!available_count)
506 			break;
507 
508 		list_del(&fence->node);
509 		pvr_dev->kccb.reserved_count++;
510 		available_count--;
511 		dma_fence_signal(&fence->base);
512 		dma_fence_put(&fence->base);
513 	}
514 
515 out_unlock:
516 	mutex_unlock(&pvr_dev->kccb.ccb.lock);
517 }
518 
519 /**
520  * pvr_kccb_fini() - Cleanup device KCCB
521  * @pvr_dev: Target PowerVR device
522  */
523 void pvr_kccb_fini(struct pvr_device *pvr_dev)
524 {
525 	pvr_ccb_fini(&pvr_dev->kccb.ccb);
526 	WARN_ON(!list_empty(&pvr_dev->kccb.waiters));
527 	WARN_ON(pvr_dev->kccb.reserved_count);
528 }
529 
530 /**
531  * pvr_kccb_init() - Initialise device KCCB
532  * @pvr_dev: Target PowerVR device
533  *
534  * Returns:
535  *  * 0 on success, or
536  *  * Any error returned by pvr_ccb_init().
537  */
538 int
539 pvr_kccb_init(struct pvr_device *pvr_dev)
540 {
541 	pvr_dev->kccb.slot_count = 1 << ROGUE_FWIF_KCCB_NUMCMDS_LOG2_DEFAULT;
542 	INIT_LIST_HEAD(&pvr_dev->kccb.waiters);
543 	pvr_dev->kccb.fence_ctx.id = dma_fence_context_alloc(1);
544 	spin_lock_init(&pvr_dev->kccb.fence_ctx.lock);
545 
546 	return pvr_ccb_init(pvr_dev, &pvr_dev->kccb.ccb,
547 			    ROGUE_FWIF_KCCB_NUMCMDS_LOG2_DEFAULT,
548 			    sizeof(struct rogue_fwif_kccb_cmd));
549 }
550 
551 /**
552  * pvr_kccb_fence_alloc() - Allocate a pvr_kccb_fence object
553  *
554  * Return:
555  *  * NULL if the allocation fails, or
556  *  * A valid dma_fence pointer otherwise.
557  */
558 struct dma_fence *pvr_kccb_fence_alloc(void)
559 {
560 	struct pvr_kccb_fence *kccb_fence;
561 
562 	kccb_fence = kzalloc_obj(*kccb_fence);
563 	if (!kccb_fence)
564 		return NULL;
565 
566 	return &kccb_fence->base;
567 }
568 
569 /**
570  * pvr_kccb_fence_put() - Drop a KCCB fence reference
571  * @fence: The fence to drop the reference on.
572  *
573  * If the fence hasn't been initialized yet, dma_fence_free() is called. This
574  * way we have a single function taking care of both cases.
575  */
576 void pvr_kccb_fence_put(struct dma_fence *fence)
577 {
578 	if (!fence)
579 		return;
580 
581 	if (!fence->ops) {
582 		dma_fence_free(fence);
583 	} else {
584 		WARN_ON(fence->ops != &pvr_kccb_fence_ops);
585 		dma_fence_put(fence);
586 	}
587 }
588 
589 /**
590  * pvr_kccb_reserve_slot() - Reserve a KCCB slot for later use
591  * @pvr_dev: Target PowerVR device
592  * @f: KCCB fence object previously allocated with pvr_kccb_fence_alloc()
593  *
594  * Try to reserve a KCCB slot, and if there's no slot available,
595  * initializes the fence object and queue it to the waiters list.
596  *
597  * If NULL is returned, that means the slot is reserved. In that case,
598  * the @f is freed and shouldn't be accessed after that point.
599  *
600  * Return:
601  *  * NULL if a slot was available directly, or
602  *  * A valid dma_fence object to wait on if no slot was available.
603  */
604 struct dma_fence *
605 pvr_kccb_reserve_slot(struct pvr_device *pvr_dev, struct dma_fence *f)
606 {
607 	struct pvr_kccb_fence *fence = container_of(f, struct pvr_kccb_fence, base);
608 	struct dma_fence *out_fence = NULL;
609 	u32 used_count;
610 
611 	mutex_lock(&pvr_dev->kccb.ccb.lock);
612 
613 	used_count = pvr_kccb_used_slot_count_locked(pvr_dev);
614 	if (pvr_dev->kccb.reserved_count >= pvr_kccb_capacity(pvr_dev) - used_count) {
615 		dma_fence_init(&fence->base, &pvr_kccb_fence_ops,
616 			       &pvr_dev->kccb.fence_ctx.lock,
617 			       pvr_dev->kccb.fence_ctx.id,
618 			       atomic_inc_return(&pvr_dev->kccb.fence_ctx.seqno));
619 		out_fence = dma_fence_get(&fence->base);
620 		list_add_tail(&fence->node, &pvr_dev->kccb.waiters);
621 	} else {
622 		pvr_kccb_fence_put(f);
623 		pvr_dev->kccb.reserved_count++;
624 	}
625 
626 	mutex_unlock(&pvr_dev->kccb.ccb.lock);
627 
628 	return out_fence;
629 }
630 
631 /**
632  * pvr_kccb_release_slot() - Release a KCCB slot reserved with
633  * pvr_kccb_reserve_slot()
634  * @pvr_dev: Target PowerVR device
635  *
636  * Should only be called if something failed after the
637  * pvr_kccb_reserve_slot() call and you know you won't call
638  * pvr_kccb_send_cmd_reserved().
639  */
640 void pvr_kccb_release_slot(struct pvr_device *pvr_dev)
641 {
642 	mutex_lock(&pvr_dev->kccb.ccb.lock);
643 	if (!WARN_ON(!pvr_dev->kccb.reserved_count))
644 		pvr_dev->kccb.reserved_count--;
645 	mutex_unlock(&pvr_dev->kccb.ccb.lock);
646 }
647 
648 /**
649  * pvr_fwccb_init() - Initialise device FWCCB
650  * @pvr_dev: Target PowerVR device
651  *
652  * Returns:
653  *  * 0 on success, or
654  *  * Any error returned by pvr_ccb_init().
655  */
656 int
657 pvr_fwccb_init(struct pvr_device *pvr_dev)
658 {
659 	return pvr_ccb_init(pvr_dev, &pvr_dev->fwccb,
660 			    ROGUE_FWIF_FWCCB_NUMCMDS_LOG2,
661 			    sizeof(struct rogue_fwif_fwccb_cmd));
662 }
663