xref: /linux/drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.c (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1*17fcb3dcSFan Gong // SPDX-License-Identifier: GPL-2.0
2*17fcb3dcSFan Gong // Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
3*17fcb3dcSFan Gong 
4*17fcb3dcSFan Gong #include <linux/device.h>
5*17fcb3dcSFan Gong 
6*17fcb3dcSFan Gong #include "hinic3_hwdev.h"
7*17fcb3dcSFan Gong #include "hinic3_queue_common.h"
8*17fcb3dcSFan Gong 
9*17fcb3dcSFan Gong void hinic3_queue_pages_init(struct hinic3_queue_pages *qpages, u32 q_depth,
10*17fcb3dcSFan Gong 			     u32 page_size, u32 elem_size)
11*17fcb3dcSFan Gong {
12*17fcb3dcSFan Gong 	u32 elem_per_page;
13*17fcb3dcSFan Gong 
14*17fcb3dcSFan Gong 	elem_per_page = min(page_size / elem_size, q_depth);
15*17fcb3dcSFan Gong 
16*17fcb3dcSFan Gong 	qpages->pages = NULL;
17*17fcb3dcSFan Gong 	qpages->page_size = page_size;
18*17fcb3dcSFan Gong 	qpages->num_pages = max(q_depth / elem_per_page, 1);
19*17fcb3dcSFan Gong 	qpages->elem_size_shift = ilog2(elem_size);
20*17fcb3dcSFan Gong 	qpages->elem_per_pg_shift = ilog2(elem_per_page);
21*17fcb3dcSFan Gong }
22*17fcb3dcSFan Gong 
23*17fcb3dcSFan Gong static void __queue_pages_free(struct hinic3_hwdev *hwdev,
24*17fcb3dcSFan Gong 			       struct hinic3_queue_pages *qpages, u32 pg_cnt)
25*17fcb3dcSFan Gong {
26*17fcb3dcSFan Gong 	while (pg_cnt > 0) {
27*17fcb3dcSFan Gong 		pg_cnt--;
28*17fcb3dcSFan Gong 		hinic3_dma_free_coherent_align(hwdev->dev,
29*17fcb3dcSFan Gong 					       qpages->pages + pg_cnt);
30*17fcb3dcSFan Gong 	}
31*17fcb3dcSFan Gong 	kfree(qpages->pages);
32*17fcb3dcSFan Gong 	qpages->pages = NULL;
33*17fcb3dcSFan Gong }
34*17fcb3dcSFan Gong 
35*17fcb3dcSFan Gong void hinic3_queue_pages_free(struct hinic3_hwdev *hwdev,
36*17fcb3dcSFan Gong 			     struct hinic3_queue_pages *qpages)
37*17fcb3dcSFan Gong {
38*17fcb3dcSFan Gong 	__queue_pages_free(hwdev, qpages, qpages->num_pages);
39*17fcb3dcSFan Gong }
40*17fcb3dcSFan Gong 
41*17fcb3dcSFan Gong int hinic3_queue_pages_alloc(struct hinic3_hwdev *hwdev,
42*17fcb3dcSFan Gong 			     struct hinic3_queue_pages *qpages, u32 align)
43*17fcb3dcSFan Gong {
44*17fcb3dcSFan Gong 	u32 pg_idx;
45*17fcb3dcSFan Gong 	int err;
46*17fcb3dcSFan Gong 
47*17fcb3dcSFan Gong 	qpages->pages = kcalloc(qpages->num_pages, sizeof(qpages->pages[0]),
48*17fcb3dcSFan Gong 				GFP_KERNEL);
49*17fcb3dcSFan Gong 	if (!qpages->pages)
50*17fcb3dcSFan Gong 		return -ENOMEM;
51*17fcb3dcSFan Gong 
52*17fcb3dcSFan Gong 	if (align == 0)
53*17fcb3dcSFan Gong 		align = qpages->page_size;
54*17fcb3dcSFan Gong 
55*17fcb3dcSFan Gong 	for (pg_idx = 0; pg_idx < qpages->num_pages; pg_idx++) {
56*17fcb3dcSFan Gong 		err = hinic3_dma_zalloc_coherent_align(hwdev->dev,
57*17fcb3dcSFan Gong 						       qpages->page_size,
58*17fcb3dcSFan Gong 						       align,
59*17fcb3dcSFan Gong 						       GFP_KERNEL,
60*17fcb3dcSFan Gong 						       qpages->pages + pg_idx);
61*17fcb3dcSFan Gong 		if (err) {
62*17fcb3dcSFan Gong 			__queue_pages_free(hwdev, qpages, pg_idx);
63*17fcb3dcSFan Gong 			return err;
64*17fcb3dcSFan Gong 		}
65*17fcb3dcSFan Gong 	}
66*17fcb3dcSFan Gong 
67*17fcb3dcSFan Gong 	return 0;
68*17fcb3dcSFan Gong }
69