xref: /linux/drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.h (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
3 
4 #ifndef _HINIC3_QUEUE_COMMON_H_
5 #define _HINIC3_QUEUE_COMMON_H_
6 
7 #include <linux/types.h>
8 
9 #include "hinic3_common.h"
10 
11 struct hinic3_hwdev;
12 
13 struct hinic3_queue_pages {
14 	/* Array of DMA-able pages that actually holds the queue entries. */
15 	struct hinic3_dma_addr_align  *pages;
16 	/* Page size in bytes. */
17 	u32                           page_size;
18 	/* Number of pages, must be power of 2. */
19 	u16                           num_pages;
20 	u8                            elem_size_shift;
21 	u8                            elem_per_pg_shift;
22 };
23 
24 void hinic3_queue_pages_init(struct hinic3_queue_pages *qpages, u32 q_depth,
25 			     u32 page_size, u32 elem_size);
26 int hinic3_queue_pages_alloc(struct hinic3_hwdev *hwdev,
27 			     struct hinic3_queue_pages *qpages, u32 align);
28 void hinic3_queue_pages_free(struct hinic3_hwdev *hwdev,
29 			     struct hinic3_queue_pages *qpages);
30 
31 /* Get pointer to queue entry at the specified index. Index does not have to be
32  * masked to queue depth, only least significant bits will be used. Also
33  * provides remaining elements in same page (including the first one) in case
34  * caller needs multiple entries.
35  */
36 static inline void *get_q_element(const struct hinic3_queue_pages *qpages,
37 				  u32 idx, u32 *remaining_in_page)
38 {
39 	const struct hinic3_dma_addr_align *page;
40 	u32 page_idx, elem_idx, elem_per_pg, ofs;
41 	u8 shift;
42 
43 	shift = qpages->elem_per_pg_shift;
44 	page_idx = (idx >> shift) & (qpages->num_pages - 1);
45 	elem_per_pg = 1 << shift;
46 	elem_idx = idx & (elem_per_pg - 1);
47 	if (remaining_in_page)
48 		*remaining_in_page = elem_per_pg - elem_idx;
49 	ofs = elem_idx << qpages->elem_size_shift;
50 	page = qpages->pages + page_idx;
51 	return (char *)page->align_vaddr + ofs;
52 }
53 
54 #endif
55