1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 * 3 * Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved. 4 */ 5 6 #ifndef RDMA_CORE_FRMR_POOLS_H 7 #define RDMA_CORE_FRMR_POOLS_H 8 9 #include <rdma/frmr_pools.h> 10 #include <linux/rbtree_types.h> 11 #include <linux/spinlock_types.h> 12 #include <linux/types.h> 13 #include <asm/page.h> 14 #include <linux/workqueue.h> 15 16 #define NUM_HANDLES_PER_PAGE \ 17 ((PAGE_SIZE - sizeof(struct list_head)) / sizeof(u32)) 18 19 struct frmr_handles_page { 20 struct list_head list; 21 u32 handles[NUM_HANDLES_PER_PAGE]; 22 }; 23 24 /* FRMR queue holds a list of frmr_handles_page. 25 * num_pages: number of pages in the queue. 26 * ci: current index in the handles array across all pages. 27 */ 28 struct frmr_queue { 29 struct list_head pages_list; 30 u32 num_pages; 31 unsigned long ci; 32 }; 33 34 struct ib_frmr_pool { 35 struct rb_node node; 36 struct ib_frmr_key key; /* Pool key */ 37 38 /* Protect access to the queue */ 39 spinlock_t lock; 40 struct frmr_queue queue; 41 struct frmr_queue inactive_queue; 42 43 struct delayed_work aging_work; 44 struct ib_device *device; 45 46 u32 max_in_use; 47 u32 in_use; 48 u32 pinned_handles; 49 }; 50 51 struct ib_frmr_pools { 52 struct rb_root rb_root; 53 rwlock_t rb_lock; 54 const struct ib_frmr_pool_ops *pool_ops; 55 56 struct workqueue_struct *aging_wq; 57 u32 aging_period_sec; 58 }; 59 60 int ib_frmr_pools_set_pinned(struct ib_device *device, struct ib_frmr_key *key, 61 u32 pinned_handles); 62 int ib_frmr_pools_set_aging_period(struct ib_device *device, u32 period_sec); 63 #endif /* RDMA_CORE_FRMR_POOLS_H */ 64