1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0 3 * 4 * Copyright (c) 2014 Mellanox Technologies. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #ifndef IB_UMEM_ODP_H 36 #define IB_UMEM_ODP_H 37 38 #include <linux/rbtree.h> 39 40 #include <rdma/ib_umem.h> 41 #include <rdma/ib_verbs.h> 42 43 struct umem_odp_node { 44 u64 __subtree_last; 45 struct rb_node rb; 46 }; 47 48 struct ib_umem_odp { 49 /* 50 * An array of the pages included in the on-demand paging umem. 51 * Indices of pages that are currently not mapped into the device will 52 * contain NULL. 53 */ 54 struct page **page_list; 55 /* 56 * An array of the same size as page_list, with DMA addresses mapped 57 * for pages the pages in page_list. The lower two bits designate 58 * access permissions. See ODP_READ_ALLOWED_BIT and 59 * ODP_WRITE_ALLOWED_BIT. 60 */ 61 dma_addr_t *dma_list; 62 /* 63 * The umem_mutex protects the page_list and dma_list fields of an ODP 64 * umem, allowing only a single thread to map/unmap pages. The mutex 65 * also protects access to the mmu notifier counters. 66 */ 67 struct mutex umem_mutex; 68 void *private; /* for the HW driver to use. */ 69 70 /* When false, use the notifier counter in the ucontext struct. */ 71 bool mn_counters_active; 72 int notifiers_seq; 73 int notifiers_count; 74 75 /* A linked list of umems that don't have private mmu notifier 76 * counters yet. */ 77 struct list_head no_private_counters; 78 struct ib_umem *umem; 79 80 /* Tree tracking */ 81 struct umem_odp_node interval_tree; 82 83 struct completion notifier_completion; 84 int dying; 85 }; 86 87 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 88 89 int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem); 90 91 void ib_umem_odp_release(struct ib_umem *umem); 92 93 /* 94 * The lower 2 bits of the DMA address signal the R/W permissions for 95 * the entry. To upgrade the permissions, provide the appropriate 96 * bitmask to the map_dma_pages function. 97 * 98 * Be aware that upgrading a mapped address might result in change of 99 * the DMA address for the page. 100 */ 101 #define ODP_READ_ALLOWED_BIT (1<<0ULL) 102 #define ODP_WRITE_ALLOWED_BIT (1<<1ULL) 103 104 #define ODP_DMA_ADDR_MASK (~(ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT)) 105 106 int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 start_offset, u64 bcnt, 107 u64 access_mask, unsigned long current_seq); 108 109 void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 start_offset, 110 u64 bound); 111 112 void rbt_ib_umem_insert(struct umem_odp_node *node, struct rb_root *root); 113 void rbt_ib_umem_remove(struct umem_odp_node *node, struct rb_root *root); 114 typedef int (*umem_call_back)(struct ib_umem *item, u64 start, u64 end, 115 void *cookie); 116 /* 117 * Call the callback on each ib_umem in the range. Returns the logical or of 118 * the return values of the functions called. 119 */ 120 int rbt_ib_umem_for_each_in_range(struct rb_root *root, u64 start, u64 end, 121 umem_call_back cb, void *cookie); 122 123 struct umem_odp_node *rbt_ib_umem_iter_first(struct rb_root *root, 124 u64 start, u64 last); 125 struct umem_odp_node *rbt_ib_umem_iter_next(struct umem_odp_node *node, 126 u64 start, u64 last); 127 128 static inline int ib_umem_mmu_notifier_retry(struct ib_umem *item, 129 unsigned long mmu_seq) 130 { 131 /* 132 * This code is strongly based on the KVM code from 133 * mmu_notifier_retry. Should be called with 134 * the relevant locks taken (item->odp_data->umem_mutex 135 * and the ucontext umem_mutex semaphore locked for read). 136 */ 137 138 /* Do not allow page faults while the new ib_umem hasn't seen a state 139 * with zero notifiers yet, and doesn't have its own valid set of 140 * private counters. */ 141 if (!item->odp_data->mn_counters_active) 142 return 1; 143 144 if (unlikely(item->odp_data->notifiers_count)) 145 return 1; 146 if (item->odp_data->notifiers_seq != mmu_seq) 147 return 1; 148 return 0; 149 } 150 151 #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ 152 153 static inline int ib_umem_odp_get(struct ib_ucontext *context, 154 struct ib_umem *umem) 155 { 156 return -EINVAL; 157 } 158 159 static inline void ib_umem_odp_release(struct ib_umem *umem) {} 160 161 #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ 162 163 #endif /* IB_UMEM_ODP_H */ 164