1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3 * Copyright (c) 2022-2023 Fujitsu Ltd. All rights reserved.
4 */
5
6 #include <linux/hmm.h>
7 #include <linux/libnvdimm.h>
8
9 #include <rdma/ib_umem_odp.h>
10
11 #include "rxe.h"
12
rxe_ib_invalidate_range(struct mmu_interval_notifier * mni,const struct mmu_notifier_range * range,unsigned long cur_seq)13 static bool rxe_ib_invalidate_range(struct mmu_interval_notifier *mni,
14 const struct mmu_notifier_range *range,
15 unsigned long cur_seq)
16 {
17 struct ib_umem_odp *umem_odp =
18 container_of(mni, struct ib_umem_odp, notifier);
19 unsigned long start, end;
20
21 if (!mmu_notifier_range_blockable(range))
22 return false;
23
24 mutex_lock(&umem_odp->umem_mutex);
25 mmu_interval_set_seq(mni, cur_seq);
26
27 start = max_t(u64, ib_umem_start(umem_odp), range->start);
28 end = min_t(u64, ib_umem_end(umem_odp), range->end);
29
30 /* update umem_odp->map.pfn_list */
31 ib_umem_odp_unmap_dma_pages(umem_odp, start, end);
32
33 mutex_unlock(&umem_odp->umem_mutex);
34 return true;
35 }
36
37 const struct mmu_interval_notifier_ops rxe_mn_ops = {
38 .invalidate = rxe_ib_invalidate_range,
39 };
40
41 #define RXE_PAGEFAULT_DEFAULT 0
42 #define RXE_PAGEFAULT_RDONLY BIT(0)
43 #define RXE_PAGEFAULT_SNAPSHOT BIT(1)
rxe_odp_do_pagefault_and_lock(struct rxe_mr * mr,u64 user_va,int bcnt,u32 flags)44 static int rxe_odp_do_pagefault_and_lock(struct rxe_mr *mr, u64 user_va, int bcnt, u32 flags)
45 {
46 struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
47 bool fault = !(flags & RXE_PAGEFAULT_SNAPSHOT);
48 u64 access_mask = 0;
49 int np;
50
51 if (umem_odp->umem.writable && !(flags & RXE_PAGEFAULT_RDONLY))
52 access_mask |= HMM_PFN_WRITE;
53
54 /*
55 * ib_umem_odp_map_dma_and_lock() locks umem_mutex on success.
56 * Callers must release the lock later to let invalidation handler
57 * do its work again.
58 */
59 np = ib_umem_odp_map_dma_and_lock(umem_odp, user_va, bcnt,
60 access_mask, fault);
61 return np;
62 }
63
rxe_odp_init_pages(struct rxe_mr * mr)64 static int rxe_odp_init_pages(struct rxe_mr *mr)
65 {
66 struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
67 int ret;
68
69 ret = rxe_odp_do_pagefault_and_lock(mr, mr->umem->address,
70 mr->umem->length,
71 RXE_PAGEFAULT_SNAPSHOT);
72
73 if (ret >= 0)
74 mutex_unlock(&umem_odp->umem_mutex);
75
76 return ret >= 0 ? 0 : ret;
77 }
78
rxe_odp_mr_init_user(struct rxe_dev * rxe,u64 start,u64 length,u64 iova,int access_flags,struct rxe_mr * mr)79 int rxe_odp_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length,
80 u64 iova, int access_flags, struct rxe_mr *mr)
81 {
82 struct ib_umem_odp *umem_odp;
83 int err;
84
85 if (!IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
86 return -EOPNOTSUPP;
87
88 rxe_mr_init(access_flags, mr);
89
90 if (!start && length == U64_MAX) {
91 if (iova != 0)
92 return -EINVAL;
93 if (!(rxe->attr.odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
94 return -EINVAL;
95
96 /* Never reach here, for implicit ODP is not implemented. */
97 }
98
99 umem_odp = ib_umem_odp_get(&rxe->ib_dev, start, length, access_flags,
100 &rxe_mn_ops);
101 if (IS_ERR(umem_odp)) {
102 rxe_dbg_mr(mr, "Unable to create umem_odp err = %d\n",
103 (int)PTR_ERR(umem_odp));
104 return PTR_ERR(umem_odp);
105 }
106
107 umem_odp->private = mr;
108
109 mr->umem = &umem_odp->umem;
110 mr->access = access_flags;
111 mr->ibmr.length = length;
112 mr->ibmr.iova = iova;
113 mr->page_offset = ib_umem_offset(&umem_odp->umem);
114
115 err = rxe_odp_init_pages(mr);
116 if (err) {
117 ib_umem_odp_release(umem_odp);
118 return err;
119 }
120
121 mr->state = RXE_MR_STATE_VALID;
122 mr->ibmr.type = IB_MR_TYPE_USER;
123
124 return err;
125 }
126
rxe_check_pagefault(struct ib_umem_odp * umem_odp,u64 iova,int length)127 static inline bool rxe_check_pagefault(struct ib_umem_odp *umem_odp, u64 iova,
128 int length)
129 {
130 bool need_fault = false;
131 u64 addr;
132 int idx;
133
134 addr = iova & (~(BIT(umem_odp->page_shift) - 1));
135
136 /* Skim through all pages that are to be accessed. */
137 while (addr < iova + length) {
138 idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
139
140 if (!(umem_odp->map.pfn_list[idx] & HMM_PFN_VALID)) {
141 need_fault = true;
142 break;
143 }
144
145 addr += BIT(umem_odp->page_shift);
146 }
147 return need_fault;
148 }
149
rxe_odp_iova_to_index(struct ib_umem_odp * umem_odp,u64 iova)150 static unsigned long rxe_odp_iova_to_index(struct ib_umem_odp *umem_odp, u64 iova)
151 {
152 return (iova - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
153 }
154
rxe_odp_iova_to_page_offset(struct ib_umem_odp * umem_odp,u64 iova)155 static unsigned long rxe_odp_iova_to_page_offset(struct ib_umem_odp *umem_odp, u64 iova)
156 {
157 return iova & (BIT(umem_odp->page_shift) - 1);
158 }
159
rxe_odp_map_range_and_lock(struct rxe_mr * mr,u64 iova,int length,u32 flags)160 static int rxe_odp_map_range_and_lock(struct rxe_mr *mr, u64 iova, int length, u32 flags)
161 {
162 struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
163 bool need_fault;
164 int err;
165
166 if (unlikely(length < 1))
167 return -EINVAL;
168
169 mutex_lock(&umem_odp->umem_mutex);
170
171 need_fault = rxe_check_pagefault(umem_odp, iova, length);
172 if (need_fault) {
173 mutex_unlock(&umem_odp->umem_mutex);
174
175 /* umem_mutex is locked on success. */
176 err = rxe_odp_do_pagefault_and_lock(mr, iova, length,
177 flags);
178 if (err < 0)
179 return err;
180
181 need_fault = rxe_check_pagefault(umem_odp, iova, length);
182 if (need_fault)
183 return -EFAULT;
184 }
185
186 return 0;
187 }
188
__rxe_odp_mr_copy(struct rxe_mr * mr,u64 iova,void * addr,int length,enum rxe_mr_copy_dir dir)189 static int __rxe_odp_mr_copy(struct rxe_mr *mr, u64 iova, void *addr,
190 int length, enum rxe_mr_copy_dir dir)
191 {
192 struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
193 struct page *page;
194 int idx, bytes;
195 size_t offset;
196 u8 *user_va;
197
198 idx = rxe_odp_iova_to_index(umem_odp, iova);
199 offset = rxe_odp_iova_to_page_offset(umem_odp, iova);
200
201 while (length > 0) {
202 u8 *src, *dest;
203
204 page = hmm_pfn_to_page(umem_odp->map.pfn_list[idx]);
205 user_va = kmap_local_page(page);
206 if (!user_va)
207 return -EFAULT;
208
209 src = (dir == RXE_TO_MR_OBJ) ? addr : user_va;
210 dest = (dir == RXE_TO_MR_OBJ) ? user_va : addr;
211
212 bytes = BIT(umem_odp->page_shift) - offset;
213 if (bytes > length)
214 bytes = length;
215
216 memcpy(dest, src, bytes);
217 kunmap_local(user_va);
218
219 length -= bytes;
220 idx++;
221 offset = 0;
222 }
223
224 return 0;
225 }
226
rxe_odp_mr_copy(struct rxe_mr * mr,u64 iova,void * addr,int length,enum rxe_mr_copy_dir dir)227 int rxe_odp_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
228 enum rxe_mr_copy_dir dir)
229 {
230 struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
231 u32 flags = RXE_PAGEFAULT_DEFAULT;
232 int err;
233
234 if (length == 0)
235 return 0;
236
237 if (unlikely(!mr->umem->is_odp))
238 return -EOPNOTSUPP;
239
240 switch (dir) {
241 case RXE_TO_MR_OBJ:
242 break;
243
244 case RXE_FROM_MR_OBJ:
245 flags |= RXE_PAGEFAULT_RDONLY;
246 break;
247
248 default:
249 return -EINVAL;
250 }
251
252 err = rxe_odp_map_range_and_lock(mr, iova, length, flags);
253 if (err)
254 return err;
255
256 err = __rxe_odp_mr_copy(mr, iova, addr, length, dir);
257
258 mutex_unlock(&umem_odp->umem_mutex);
259
260 return err;
261 }
262
rxe_odp_do_atomic_op(struct rxe_mr * mr,u64 iova,int opcode,u64 compare,u64 swap_add,u64 * orig_val)263 static enum resp_states rxe_odp_do_atomic_op(struct rxe_mr *mr, u64 iova,
264 int opcode, u64 compare,
265 u64 swap_add, u64 *orig_val)
266 {
267 struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
268 unsigned int page_offset;
269 struct page *page;
270 unsigned int idx;
271 u64 value;
272 u64 *va;
273 int err;
274
275 if (unlikely(mr->state != RXE_MR_STATE_VALID)) {
276 rxe_dbg_mr(mr, "mr not in valid state\n");
277 return RESPST_ERR_RKEY_VIOLATION;
278 }
279
280 err = mr_check_range(mr, iova, sizeof(value));
281 if (err) {
282 rxe_dbg_mr(mr, "iova out of range\n");
283 return RESPST_ERR_RKEY_VIOLATION;
284 }
285
286 idx = rxe_odp_iova_to_index(umem_odp, iova);
287 page_offset = rxe_odp_iova_to_page_offset(umem_odp, iova);
288 page = hmm_pfn_to_page(umem_odp->map.pfn_list[idx]);
289 if (!page)
290 return RESPST_ERR_RKEY_VIOLATION;
291
292 if (unlikely(page_offset & 0x7)) {
293 rxe_dbg_mr(mr, "iova not aligned\n");
294 return RESPST_ERR_MISALIGNED_ATOMIC;
295 }
296
297 va = kmap_local_page(page);
298
299 spin_lock_bh(&atomic_ops_lock);
300 value = *orig_val = va[page_offset >> 3];
301
302 if (opcode == IB_OPCODE_RC_COMPARE_SWAP) {
303 if (value == compare)
304 va[page_offset >> 3] = swap_add;
305 } else {
306 value += swap_add;
307 va[page_offset >> 3] = value;
308 }
309 spin_unlock_bh(&atomic_ops_lock);
310
311 kunmap_local(va);
312
313 return RESPST_NONE;
314 }
315
rxe_odp_atomic_op(struct rxe_mr * mr,u64 iova,int opcode,u64 compare,u64 swap_add,u64 * orig_val)316 enum resp_states rxe_odp_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
317 u64 compare, u64 swap_add, u64 *orig_val)
318 {
319 struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
320 int err;
321
322 err = rxe_odp_map_range_and_lock(mr, iova, sizeof(char),
323 RXE_PAGEFAULT_DEFAULT);
324 if (err < 0)
325 return RESPST_ERR_RKEY_VIOLATION;
326
327 err = rxe_odp_do_atomic_op(mr, iova, opcode, compare, swap_add,
328 orig_val);
329 mutex_unlock(&umem_odp->umem_mutex);
330
331 return err;
332 }
333
rxe_odp_flush_pmem_iova(struct rxe_mr * mr,u64 iova,unsigned int length)334 int rxe_odp_flush_pmem_iova(struct rxe_mr *mr, u64 iova,
335 unsigned int length)
336 {
337 struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
338 unsigned int page_offset;
339 unsigned long index;
340 struct page *page;
341 unsigned int bytes;
342 int err;
343 u8 *va;
344
345 err = rxe_odp_map_range_and_lock(mr, iova, length,
346 RXE_PAGEFAULT_DEFAULT);
347 if (err)
348 return err;
349
350 while (length > 0) {
351 index = rxe_odp_iova_to_index(umem_odp, iova);
352 page_offset = rxe_odp_iova_to_page_offset(umem_odp, iova);
353
354 page = hmm_pfn_to_page(umem_odp->map.pfn_list[index]);
355 if (!page) {
356 mutex_unlock(&umem_odp->umem_mutex);
357 return -EFAULT;
358 }
359
360 bytes = min_t(unsigned int, length,
361 mr_page_size(mr) - page_offset);
362
363 va = kmap_local_page(page);
364 arch_wb_cache_pmem(va + page_offset, bytes);
365 kunmap_local(va);
366
367 length -= bytes;
368 iova += bytes;
369 page_offset = 0;
370 }
371
372 mutex_unlock(&umem_odp->umem_mutex);
373
374 return 0;
375 }
376
rxe_odp_do_atomic_write(struct rxe_mr * mr,u64 iova,u64 value)377 enum resp_states rxe_odp_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
378 {
379 struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
380 unsigned int page_offset;
381 unsigned long index;
382 struct page *page;
383 int err;
384 u64 *va;
385
386 /* See IBA oA19-28 */
387 err = mr_check_range(mr, iova, sizeof(value));
388 if (unlikely(err)) {
389 rxe_dbg_mr(mr, "iova out of range\n");
390 return RESPST_ERR_RKEY_VIOLATION;
391 }
392
393 err = rxe_odp_map_range_and_lock(mr, iova, sizeof(value),
394 RXE_PAGEFAULT_DEFAULT);
395 if (err)
396 return RESPST_ERR_RKEY_VIOLATION;
397
398 page_offset = rxe_odp_iova_to_page_offset(umem_odp, iova);
399 index = rxe_odp_iova_to_index(umem_odp, iova);
400 page = hmm_pfn_to_page(umem_odp->map.pfn_list[index]);
401 if (!page) {
402 mutex_unlock(&umem_odp->umem_mutex);
403 return RESPST_ERR_RKEY_VIOLATION;
404 }
405 /* See IBA A19.4.2 */
406 if (unlikely(page_offset & 0x7)) {
407 mutex_unlock(&umem_odp->umem_mutex);
408 rxe_dbg_mr(mr, "misaligned address\n");
409 return RESPST_ERR_MISALIGNED_ATOMIC;
410 }
411
412 va = kmap_local_page(page);
413 /* Do atomic write after all prior operations have completed */
414 smp_store_release(&va[page_offset >> 3], value);
415 kunmap_local(va);
416
417 mutex_unlock(&umem_odp->umem_mutex);
418
419 return RESPST_NONE;
420 }
421