1*17fcb3dcSFan Gong /* SPDX-License-Identifier: GPL-2.0 */ 2*17fcb3dcSFan Gong /* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ 3*17fcb3dcSFan Gong 4*17fcb3dcSFan Gong #ifndef _HINIC3_QUEUE_COMMON_H_ 5*17fcb3dcSFan Gong #define _HINIC3_QUEUE_COMMON_H_ 6*17fcb3dcSFan Gong 7*17fcb3dcSFan Gong #include <linux/types.h> 8*17fcb3dcSFan Gong 9*17fcb3dcSFan Gong #include "hinic3_common.h" 10*17fcb3dcSFan Gong 11*17fcb3dcSFan Gong struct hinic3_hwdev; 12*17fcb3dcSFan Gong 13*17fcb3dcSFan Gong struct hinic3_queue_pages { 14*17fcb3dcSFan Gong /* Array of DMA-able pages that actually holds the queue entries. */ 15*17fcb3dcSFan Gong struct hinic3_dma_addr_align *pages; 16*17fcb3dcSFan Gong /* Page size in bytes. */ 17*17fcb3dcSFan Gong u32 page_size; 18*17fcb3dcSFan Gong /* Number of pages, must be power of 2. */ 19*17fcb3dcSFan Gong u16 num_pages; 20*17fcb3dcSFan Gong u8 elem_size_shift; 21*17fcb3dcSFan Gong u8 elem_per_pg_shift; 22*17fcb3dcSFan Gong }; 23*17fcb3dcSFan Gong 24*17fcb3dcSFan Gong void hinic3_queue_pages_init(struct hinic3_queue_pages *qpages, u32 q_depth, 25*17fcb3dcSFan Gong u32 page_size, u32 elem_size); 26*17fcb3dcSFan Gong int hinic3_queue_pages_alloc(struct hinic3_hwdev *hwdev, 27*17fcb3dcSFan Gong struct hinic3_queue_pages *qpages, u32 align); 28*17fcb3dcSFan Gong void hinic3_queue_pages_free(struct hinic3_hwdev *hwdev, 29*17fcb3dcSFan Gong struct hinic3_queue_pages *qpages); 30*17fcb3dcSFan Gong 31*17fcb3dcSFan Gong /* Get pointer to queue entry at the specified index. Index does not have to be 32*17fcb3dcSFan Gong * masked to queue depth, only least significant bits will be used. Also 33*17fcb3dcSFan Gong * provides remaining elements in same page (including the first one) in case 34*17fcb3dcSFan Gong * caller needs multiple entries. 35*17fcb3dcSFan Gong */ 36*17fcb3dcSFan Gong static inline void *get_q_element(const struct hinic3_queue_pages *qpages, 37*17fcb3dcSFan Gong u32 idx, u32 *remaining_in_page) 38*17fcb3dcSFan Gong { 39*17fcb3dcSFan Gong const struct hinic3_dma_addr_align *page; 40*17fcb3dcSFan Gong u32 page_idx, elem_idx, elem_per_pg, ofs; 41*17fcb3dcSFan Gong u8 shift; 42*17fcb3dcSFan Gong 43*17fcb3dcSFan Gong shift = qpages->elem_per_pg_shift; 44*17fcb3dcSFan Gong page_idx = (idx >> shift) & (qpages->num_pages - 1); 45*17fcb3dcSFan Gong elem_per_pg = 1 << shift; 46*17fcb3dcSFan Gong elem_idx = idx & (elem_per_pg - 1); 47*17fcb3dcSFan Gong if (remaining_in_page) 48*17fcb3dcSFan Gong *remaining_in_page = elem_per_pg - elem_idx; 49*17fcb3dcSFan Gong ofs = elem_idx << qpages->elem_size_shift; 50*17fcb3dcSFan Gong page = qpages->pages + page_idx; 51*17fcb3dcSFan Gong return (char *)page->align_vaddr + ofs; 52*17fcb3dcSFan Gong } 53*17fcb3dcSFan Gong 54*17fcb3dcSFan Gong #endif 55