1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3 * Copyright(c) 2016 - 2020 Intel Corporation.
4 */
5
6 #include <linux/hash.h>
7 #include <linux/bitops.h>
8 #include <linux/lockdep.h>
9 #include <linux/vmalloc.h>
10 #include <linux/slab.h>
11 #include <rdma/ib_verbs.h>
12 #include <rdma/ib_hdrs.h>
13 #include <rdma/opa_addr.h>
14 #include <rdma/uverbs_ioctl.h>
15 #include "qp.h"
16 #include "vt.h"
17 #include "trace.h"
18
19 #define RVT_RWQ_COUNT_THRESHOLD 16
20
21 static void rvt_rc_timeout(struct timer_list *t);
22 static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
23 enum ib_qp_type type);
24
25 /*
26 * Convert the AETH RNR timeout code into the number of microseconds.
27 */
28 static const u32 ib_rvt_rnr_table[32] = {
29 655360, /* 00: 655.36 */
30 10, /* 01: .01 */
31 20, /* 02 .02 */
32 30, /* 03: .03 */
33 40, /* 04: .04 */
34 60, /* 05: .06 */
35 80, /* 06: .08 */
36 120, /* 07: .12 */
37 160, /* 08: .16 */
38 240, /* 09: .24 */
39 320, /* 0A: .32 */
40 480, /* 0B: .48 */
41 640, /* 0C: .64 */
42 960, /* 0D: .96 */
43 1280, /* 0E: 1.28 */
44 1920, /* 0F: 1.92 */
45 2560, /* 10: 2.56 */
46 3840, /* 11: 3.84 */
47 5120, /* 12: 5.12 */
48 7680, /* 13: 7.68 */
49 10240, /* 14: 10.24 */
50 15360, /* 15: 15.36 */
51 20480, /* 16: 20.48 */
52 30720, /* 17: 30.72 */
53 40960, /* 18: 40.96 */
54 61440, /* 19: 61.44 */
55 81920, /* 1A: 81.92 */
56 122880, /* 1B: 122.88 */
57 163840, /* 1C: 163.84 */
58 245760, /* 1D: 245.76 */
59 327680, /* 1E: 327.68 */
60 491520 /* 1F: 491.52 */
61 };
62
63 /*
64 * Note that it is OK to post send work requests in the SQE and ERR
65 * states; rvt_do_send() will process them and generate error
66 * completions as per IB 1.2 C10-96.
67 */
68 const int ib_rvt_state_ops[IB_QPS_ERR + 1] = {
69 [IB_QPS_RESET] = 0,
70 [IB_QPS_INIT] = RVT_POST_RECV_OK,
71 [IB_QPS_RTR] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK,
72 [IB_QPS_RTS] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
73 RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK |
74 RVT_PROCESS_NEXT_SEND_OK,
75 [IB_QPS_SQD] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
76 RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK,
77 [IB_QPS_SQE] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
78 RVT_POST_SEND_OK | RVT_FLUSH_SEND,
79 [IB_QPS_ERR] = RVT_POST_RECV_OK | RVT_FLUSH_RECV |
80 RVT_POST_SEND_OK | RVT_FLUSH_SEND,
81 };
82 EXPORT_SYMBOL(ib_rvt_state_ops);
83
84 /* platform specific: return the last level cache (llc) size, in KiB */
rvt_wss_llc_size(void)85 static int rvt_wss_llc_size(void)
86 {
87 /* assume that the boot CPU value is universal for all CPUs */
88 return boot_cpu_data.x86_cache_size;
89 }
90
91 /* platform specific: cacheless copy */
cacheless_memcpy(void * dst,void * src,size_t n)92 static void cacheless_memcpy(void *dst, void *src, size_t n)
93 {
94 /*
95 * Use the only available X64 cacheless copy. Add a __user cast
96 * to quiet sparse. The src agument is already in the kernel so
97 * there are no security issues. The extra fault recovery machinery
98 * is not invoked.
99 */
100 __copy_user_nocache(dst, (void __user *)src, n);
101 }
102
rvt_wss_exit(struct rvt_dev_info * rdi)103 void rvt_wss_exit(struct rvt_dev_info *rdi)
104 {
105 struct rvt_wss *wss = rdi->wss;
106
107 if (!wss)
108 return;
109
110 /* coded to handle partially initialized and repeat callers */
111 kfree(wss->entries);
112 wss->entries = NULL;
113 kfree(rdi->wss);
114 rdi->wss = NULL;
115 }
116
117 /*
118 * rvt_wss_init - Init wss data structures
119 *
120 * Return: 0 on success
121 */
rvt_wss_init(struct rvt_dev_info * rdi)122 int rvt_wss_init(struct rvt_dev_info *rdi)
123 {
124 unsigned int sge_copy_mode = rdi->dparms.sge_copy_mode;
125 unsigned int wss_threshold = rdi->dparms.wss_threshold;
126 unsigned int wss_clean_period = rdi->dparms.wss_clean_period;
127 long llc_size;
128 long llc_bits;
129 long table_size;
130 long table_bits;
131 struct rvt_wss *wss;
132 int node = rdi->dparms.node;
133
134 if (sge_copy_mode != RVT_SGE_COPY_ADAPTIVE) {
135 rdi->wss = NULL;
136 return 0;
137 }
138
139 rdi->wss = kzalloc_node(sizeof(*rdi->wss), GFP_KERNEL, node);
140 if (!rdi->wss)
141 return -ENOMEM;
142 wss = rdi->wss;
143
144 /* check for a valid percent range - default to 80 if none or invalid */
145 if (wss_threshold < 1 || wss_threshold > 100)
146 wss_threshold = 80;
147
148 /* reject a wildly large period */
149 if (wss_clean_period > 1000000)
150 wss_clean_period = 256;
151
152 /* reject a zero period */
153 if (wss_clean_period == 0)
154 wss_clean_period = 1;
155
156 /*
157 * Calculate the table size - the next power of 2 larger than the
158 * LLC size. LLC size is in KiB.
159 */
160 llc_size = rvt_wss_llc_size() * 1024;
161 table_size = roundup_pow_of_two(llc_size);
162
163 /* one bit per page in rounded up table */
164 llc_bits = llc_size / PAGE_SIZE;
165 table_bits = table_size / PAGE_SIZE;
166 wss->pages_mask = table_bits - 1;
167 wss->num_entries = table_bits / BITS_PER_LONG;
168
169 wss->threshold = (llc_bits * wss_threshold) / 100;
170 if (wss->threshold == 0)
171 wss->threshold = 1;
172
173 wss->clean_period = wss_clean_period;
174 atomic_set(&wss->clean_counter, wss_clean_period);
175
176 wss->entries = kcalloc_node(wss->num_entries, sizeof(*wss->entries),
177 GFP_KERNEL, node);
178 if (!wss->entries) {
179 rvt_wss_exit(rdi);
180 return -ENOMEM;
181 }
182
183 return 0;
184 }
185
186 /*
187 * Advance the clean counter. When the clean period has expired,
188 * clean an entry.
189 *
190 * This is implemented in atomics to avoid locking. Because multiple
191 * variables are involved, it can be racy which can lead to slightly
192 * inaccurate information. Since this is only a heuristic, this is
193 * OK. Any innaccuracies will clean themselves out as the counter
194 * advances. That said, it is unlikely the entry clean operation will
195 * race - the next possible racer will not start until the next clean
196 * period.
197 *
198 * The clean counter is implemented as a decrement to zero. When zero
199 * is reached an entry is cleaned.
200 */
wss_advance_clean_counter(struct rvt_wss * wss)201 static void wss_advance_clean_counter(struct rvt_wss *wss)
202 {
203 int entry;
204 int weight;
205 unsigned long bits;
206
207 /* become the cleaner if we decrement the counter to zero */
208 if (atomic_dec_and_test(&wss->clean_counter)) {
209 /*
210 * Set, not add, the clean period. This avoids an issue
211 * where the counter could decrement below the clean period.
212 * Doing a set can result in lost decrements, slowing the
213 * clean advance. Since this a heuristic, this possible
214 * slowdown is OK.
215 *
216 * An alternative is to loop, advancing the counter by a
217 * clean period until the result is > 0. However, this could
218 * lead to several threads keeping another in the clean loop.
219 * This could be mitigated by limiting the number of times
220 * we stay in the loop.
221 */
222 atomic_set(&wss->clean_counter, wss->clean_period);
223
224 /*
225 * Uniquely grab the entry to clean and move to next.
226 * The current entry is always the lower bits of
227 * wss.clean_entry. The table size, wss.num_entries,
228 * is always a power-of-2.
229 */
230 entry = (atomic_inc_return(&wss->clean_entry) - 1)
231 & (wss->num_entries - 1);
232
233 /* clear the entry and count the bits */
234 bits = xchg(&wss->entries[entry], 0);
235 weight = hweight64((u64)bits);
236 /* only adjust the contended total count if needed */
237 if (weight)
238 atomic_sub(weight, &wss->total_count);
239 }
240 }
241
242 /*
243 * Insert the given address into the working set array.
244 */
wss_insert(struct rvt_wss * wss,void * address)245 static void wss_insert(struct rvt_wss *wss, void *address)
246 {
247 u32 page = ((unsigned long)address >> PAGE_SHIFT) & wss->pages_mask;
248 u32 entry = page / BITS_PER_LONG; /* assumes this ends up a shift */
249 u32 nr = page & (BITS_PER_LONG - 1);
250
251 if (!test_and_set_bit(nr, &wss->entries[entry]))
252 atomic_inc(&wss->total_count);
253
254 wss_advance_clean_counter(wss);
255 }
256
257 /*
258 * Is the working set larger than the threshold?
259 */
wss_exceeds_threshold(struct rvt_wss * wss)260 static inline bool wss_exceeds_threshold(struct rvt_wss *wss)
261 {
262 return atomic_read(&wss->total_count) >= wss->threshold;
263 }
264
get_map_page(struct rvt_qpn_table * qpt,struct rvt_qpn_map * map)265 static void get_map_page(struct rvt_qpn_table *qpt,
266 struct rvt_qpn_map *map)
267 {
268 unsigned long page = get_zeroed_page(GFP_KERNEL);
269
270 /*
271 * Free the page if someone raced with us installing it.
272 */
273
274 spin_lock(&qpt->lock);
275 if (map->page)
276 free_page(page);
277 else
278 map->page = (void *)page;
279 spin_unlock(&qpt->lock);
280 }
281
282 /**
283 * init_qpn_table - initialize the QP number table for a device
284 * @rdi: rvt dev struct
285 * @qpt: the QPN table
286 */
init_qpn_table(struct rvt_dev_info * rdi,struct rvt_qpn_table * qpt)287 static int init_qpn_table(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt)
288 {
289 u32 offset, i;
290 struct rvt_qpn_map *map;
291 int ret = 0;
292
293 if (!(rdi->dparms.qpn_res_end >= rdi->dparms.qpn_res_start))
294 return -EINVAL;
295
296 spin_lock_init(&qpt->lock);
297
298 qpt->last = rdi->dparms.qpn_start;
299 qpt->incr = rdi->dparms.qpn_inc << rdi->dparms.qos_shift;
300
301 /*
302 * Drivers may want some QPs beyond what we need for verbs let them use
303 * our qpn table. No need for two. Lets go ahead and mark the bitmaps
304 * for those. The reserved range must be *after* the range which verbs
305 * will pick from.
306 */
307
308 /* Figure out number of bit maps needed before reserved range */
309 qpt->nmaps = rdi->dparms.qpn_res_start / RVT_BITS_PER_PAGE;
310
311 /* This should always be zero */
312 offset = rdi->dparms.qpn_res_start & RVT_BITS_PER_PAGE_MASK;
313
314 /* Starting with the first reserved bit map */
315 map = &qpt->map[qpt->nmaps];
316
317 rvt_pr_info(rdi, "Reserving QPNs from 0x%x to 0x%x for non-verbs use\n",
318 rdi->dparms.qpn_res_start, rdi->dparms.qpn_res_end);
319 for (i = rdi->dparms.qpn_res_start; i <= rdi->dparms.qpn_res_end; i++) {
320 if (!map->page) {
321 get_map_page(qpt, map);
322 if (!map->page) {
323 ret = -ENOMEM;
324 break;
325 }
326 }
327 set_bit(offset, map->page);
328 offset++;
329 if (offset == RVT_BITS_PER_PAGE) {
330 /* next page */
331 qpt->nmaps++;
332 map++;
333 offset = 0;
334 }
335 }
336 return ret;
337 }
338
339 /**
340 * free_qpn_table - free the QP number table for a device
341 * @qpt: the QPN table
342 */
free_qpn_table(struct rvt_qpn_table * qpt)343 static void free_qpn_table(struct rvt_qpn_table *qpt)
344 {
345 int i;
346
347 for (i = 0; i < ARRAY_SIZE(qpt->map); i++)
348 free_page((unsigned long)qpt->map[i].page);
349 }
350
351 /**
352 * rvt_driver_qp_init - Init driver qp resources
353 * @rdi: rvt dev strucutre
354 *
355 * Return: 0 on success
356 */
rvt_driver_qp_init(struct rvt_dev_info * rdi)357 int rvt_driver_qp_init(struct rvt_dev_info *rdi)
358 {
359 int i;
360 int ret = -ENOMEM;
361
362 if (!rdi->dparms.qp_table_size)
363 return -EINVAL;
364
365 /*
366 * If driver is not doing any QP allocation then make sure it is
367 * providing the necessary QP functions.
368 */
369 if (!rdi->driver_f.free_all_qps ||
370 !rdi->driver_f.qp_priv_alloc ||
371 !rdi->driver_f.qp_priv_free ||
372 !rdi->driver_f.notify_qp_reset ||
373 !rdi->driver_f.notify_restart_rc)
374 return -EINVAL;
375
376 /* allocate parent object */
377 rdi->qp_dev = kzalloc_node(sizeof(*rdi->qp_dev), GFP_KERNEL,
378 rdi->dparms.node);
379 if (!rdi->qp_dev)
380 return -ENOMEM;
381
382 /* allocate hash table */
383 rdi->qp_dev->qp_table_size = rdi->dparms.qp_table_size;
384 rdi->qp_dev->qp_table_bits = ilog2(rdi->dparms.qp_table_size);
385 rdi->qp_dev->qp_table =
386 kmalloc_array_node(rdi->qp_dev->qp_table_size,
387 sizeof(*rdi->qp_dev->qp_table),
388 GFP_KERNEL, rdi->dparms.node);
389 if (!rdi->qp_dev->qp_table)
390 goto no_qp_table;
391
392 for (i = 0; i < rdi->qp_dev->qp_table_size; i++)
393 RCU_INIT_POINTER(rdi->qp_dev->qp_table[i], NULL);
394
395 spin_lock_init(&rdi->qp_dev->qpt_lock);
396
397 /* initialize qpn map */
398 if (init_qpn_table(rdi, &rdi->qp_dev->qpn_table))
399 goto fail_table;
400
401 spin_lock_init(&rdi->n_qps_lock);
402
403 return 0;
404
405 fail_table:
406 kfree(rdi->qp_dev->qp_table);
407 free_qpn_table(&rdi->qp_dev->qpn_table);
408
409 no_qp_table:
410 kfree(rdi->qp_dev);
411
412 return ret;
413 }
414
415 /**
416 * rvt_free_qp_cb - callback function to reset a qp
417 * @qp: the qp to reset
418 * @v: a 64-bit value
419 *
420 * This function resets the qp and removes it from the
421 * qp hash table.
422 */
rvt_free_qp_cb(struct rvt_qp * qp,u64 v)423 static void rvt_free_qp_cb(struct rvt_qp *qp, u64 v)
424 {
425 unsigned int *qp_inuse = (unsigned int *)v;
426 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
427
428 /* Reset the qp and remove it from the qp hash list */
429 rvt_reset_qp(rdi, qp, qp->ibqp.qp_type);
430
431 /* Increment the qp_inuse count */
432 (*qp_inuse)++;
433 }
434
435 /**
436 * rvt_free_all_qps - check for QPs still in use
437 * @rdi: rvt device info structure
438 *
439 * There should not be any QPs still in use.
440 * Free memory for table.
441 * Return the number of QPs still in use.
442 */
rvt_free_all_qps(struct rvt_dev_info * rdi)443 static unsigned rvt_free_all_qps(struct rvt_dev_info *rdi)
444 {
445 unsigned int qp_inuse = 0;
446
447 qp_inuse += rvt_mcast_tree_empty(rdi);
448
449 rvt_qp_iter(rdi, (u64)&qp_inuse, rvt_free_qp_cb);
450
451 return qp_inuse;
452 }
453
454 /**
455 * rvt_qp_exit - clean up qps on device exit
456 * @rdi: rvt dev structure
457 *
458 * Check for qp leaks and free resources.
459 */
rvt_qp_exit(struct rvt_dev_info * rdi)460 void rvt_qp_exit(struct rvt_dev_info *rdi)
461 {
462 u32 qps_inuse = rvt_free_all_qps(rdi);
463
464 if (qps_inuse)
465 rvt_pr_err(rdi, "QP memory leak! %u still in use\n",
466 qps_inuse);
467
468 kfree(rdi->qp_dev->qp_table);
469 free_qpn_table(&rdi->qp_dev->qpn_table);
470 kfree(rdi->qp_dev);
471 }
472
mk_qpn(struct rvt_qpn_table * qpt,struct rvt_qpn_map * map,unsigned off)473 static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
474 struct rvt_qpn_map *map, unsigned off)
475 {
476 return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
477 }
478
479 /**
480 * alloc_qpn - Allocate the next available qpn or zero/one for QP type
481 * IB_QPT_SMI/IB_QPT_GSI
482 * @rdi: rvt device info structure
483 * @qpt: queue pair number table pointer
484 * @type: the QP type
485 * @port_num: IB port number, 1 based, comes from core
486 * @exclude_prefix: prefix of special queue pair number being allocated
487 *
488 * Return: The queue pair number
489 */
alloc_qpn(struct rvt_dev_info * rdi,struct rvt_qpn_table * qpt,enum ib_qp_type type,u8 port_num,u8 exclude_prefix)490 static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
491 enum ib_qp_type type, u8 port_num, u8 exclude_prefix)
492 {
493 u32 i, offset, max_scan, qpn;
494 struct rvt_qpn_map *map;
495 u32 ret;
496 u32 max_qpn = exclude_prefix == RVT_AIP_QP_PREFIX ?
497 RVT_AIP_QPN_MAX : RVT_QPN_MAX;
498
499 if (rdi->driver_f.alloc_qpn)
500 return rdi->driver_f.alloc_qpn(rdi, qpt, type, port_num);
501
502 if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
503 unsigned n;
504
505 ret = type == IB_QPT_GSI;
506 n = 1 << (ret + 2 * (port_num - 1));
507 spin_lock(&qpt->lock);
508 if (qpt->flags & n)
509 ret = -EINVAL;
510 else
511 qpt->flags |= n;
512 spin_unlock(&qpt->lock);
513 goto bail;
514 }
515
516 qpn = qpt->last + qpt->incr;
517 if (qpn >= max_qpn)
518 qpn = qpt->incr | ((qpt->last & 1) ^ 1);
519 /* offset carries bit 0 */
520 offset = qpn & RVT_BITS_PER_PAGE_MASK;
521 map = &qpt->map[qpn / RVT_BITS_PER_PAGE];
522 max_scan = qpt->nmaps - !offset;
523 for (i = 0;;) {
524 if (unlikely(!map->page)) {
525 get_map_page(qpt, map);
526 if (unlikely(!map->page))
527 break;
528 }
529 do {
530 if (!test_and_set_bit(offset, map->page)) {
531 qpt->last = qpn;
532 ret = qpn;
533 goto bail;
534 }
535 offset += qpt->incr;
536 /*
537 * This qpn might be bogus if offset >= BITS_PER_PAGE.
538 * That is OK. It gets re-assigned below
539 */
540 qpn = mk_qpn(qpt, map, offset);
541 } while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX);
542 /*
543 * In order to keep the number of pages allocated to a
544 * minimum, we scan the all existing pages before increasing
545 * the size of the bitmap table.
546 */
547 if (++i > max_scan) {
548 if (qpt->nmaps == RVT_QPNMAP_ENTRIES)
549 break;
550 map = &qpt->map[qpt->nmaps++];
551 /* start at incr with current bit 0 */
552 offset = qpt->incr | (offset & 1);
553 } else if (map < &qpt->map[qpt->nmaps]) {
554 ++map;
555 /* start at incr with current bit 0 */
556 offset = qpt->incr | (offset & 1);
557 } else {
558 map = &qpt->map[0];
559 /* wrap to first map page, invert bit 0 */
560 offset = qpt->incr | ((offset & 1) ^ 1);
561 }
562 /* there can be no set bits in low-order QoS bits */
563 WARN_ON(rdi->dparms.qos_shift > 1 &&
564 offset & ((BIT(rdi->dparms.qos_shift - 1) - 1) << 1));
565 qpn = mk_qpn(qpt, map, offset);
566 }
567
568 ret = -ENOMEM;
569
570 bail:
571 return ret;
572 }
573
574 /**
575 * rvt_clear_mr_refs - Drop help mr refs
576 * @qp: rvt qp data structure
577 * @clr_sends: If shoudl clear send side or not
578 */
rvt_clear_mr_refs(struct rvt_qp * qp,int clr_sends)579 static void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends)
580 {
581 unsigned n;
582 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
583
584 if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
585 rvt_put_ss(&qp->s_rdma_read_sge);
586
587 rvt_put_ss(&qp->r_sge);
588
589 if (clr_sends) {
590 while (qp->s_last != qp->s_head) {
591 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_last);
592
593 rvt_put_qp_swqe(qp, wqe);
594 if (++qp->s_last >= qp->s_size)
595 qp->s_last = 0;
596 smp_wmb(); /* see qp_set_savail */
597 }
598 if (qp->s_rdma_mr) {
599 rvt_put_mr(qp->s_rdma_mr);
600 qp->s_rdma_mr = NULL;
601 }
602 }
603
604 for (n = 0; qp->s_ack_queue && n < rvt_max_atomic(rdi); n++) {
605 struct rvt_ack_entry *e = &qp->s_ack_queue[n];
606
607 if (e->rdma_sge.mr) {
608 rvt_put_mr(e->rdma_sge.mr);
609 e->rdma_sge.mr = NULL;
610 }
611 }
612 }
613
614 /**
615 * rvt_swqe_has_lkey - return true if lkey is used by swqe
616 * @wqe: the send wqe
617 * @lkey: the lkey
618 *
619 * Test the swqe for using lkey
620 */
rvt_swqe_has_lkey(struct rvt_swqe * wqe,u32 lkey)621 static bool rvt_swqe_has_lkey(struct rvt_swqe *wqe, u32 lkey)
622 {
623 int i;
624
625 for (i = 0; i < wqe->wr.num_sge; i++) {
626 struct rvt_sge *sge = &wqe->sg_list[i];
627
628 if (rvt_mr_has_lkey(sge->mr, lkey))
629 return true;
630 }
631 return false;
632 }
633
634 /**
635 * rvt_qp_sends_has_lkey - return true is qp sends use lkey
636 * @qp: the rvt_qp
637 * @lkey: the lkey
638 */
rvt_qp_sends_has_lkey(struct rvt_qp * qp,u32 lkey)639 static bool rvt_qp_sends_has_lkey(struct rvt_qp *qp, u32 lkey)
640 {
641 u32 s_last = qp->s_last;
642
643 while (s_last != qp->s_head) {
644 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, s_last);
645
646 if (rvt_swqe_has_lkey(wqe, lkey))
647 return true;
648
649 if (++s_last >= qp->s_size)
650 s_last = 0;
651 }
652 if (qp->s_rdma_mr)
653 if (rvt_mr_has_lkey(qp->s_rdma_mr, lkey))
654 return true;
655 return false;
656 }
657
658 /**
659 * rvt_qp_acks_has_lkey - return true if acks have lkey
660 * @qp: the qp
661 * @lkey: the lkey
662 */
rvt_qp_acks_has_lkey(struct rvt_qp * qp,u32 lkey)663 static bool rvt_qp_acks_has_lkey(struct rvt_qp *qp, u32 lkey)
664 {
665 int i;
666 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
667
668 for (i = 0; qp->s_ack_queue && i < rvt_max_atomic(rdi); i++) {
669 struct rvt_ack_entry *e = &qp->s_ack_queue[i];
670
671 if (rvt_mr_has_lkey(e->rdma_sge.mr, lkey))
672 return true;
673 }
674 return false;
675 }
676
677 /**
678 * rvt_qp_mr_clean - clean up remote ops for lkey
679 * @qp: the qp
680 * @lkey: the lkey that is being de-registered
681 *
682 * This routine checks if the lkey is being used by
683 * the qp.
684 *
685 * If so, the qp is put into an error state to elminate
686 * any references from the qp.
687 */
rvt_qp_mr_clean(struct rvt_qp * qp,u32 lkey)688 void rvt_qp_mr_clean(struct rvt_qp *qp, u32 lkey)
689 {
690 bool lastwqe = false;
691
692 if (qp->ibqp.qp_type == IB_QPT_SMI ||
693 qp->ibqp.qp_type == IB_QPT_GSI)
694 /* avoid special QPs */
695 return;
696 spin_lock_irq(&qp->r_lock);
697 spin_lock(&qp->s_hlock);
698 spin_lock(&qp->s_lock);
699
700 if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
701 goto check_lwqe;
702
703 if (rvt_ss_has_lkey(&qp->r_sge, lkey) ||
704 rvt_qp_sends_has_lkey(qp, lkey) ||
705 rvt_qp_acks_has_lkey(qp, lkey))
706 lastwqe = rvt_error_qp(qp, IB_WC_LOC_PROT_ERR);
707 check_lwqe:
708 spin_unlock(&qp->s_lock);
709 spin_unlock(&qp->s_hlock);
710 spin_unlock_irq(&qp->r_lock);
711 if (lastwqe) {
712 struct ib_event ev;
713
714 ev.device = qp->ibqp.device;
715 ev.element.qp = &qp->ibqp;
716 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
717 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
718 }
719 }
720
721 /**
722 * rvt_remove_qp - remove qp form table
723 * @rdi: rvt dev struct
724 * @qp: qp to remove
725 *
726 * Remove the QP from the table so it can't be found asynchronously by
727 * the receive routine.
728 */
rvt_remove_qp(struct rvt_dev_info * rdi,struct rvt_qp * qp)729 static void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
730 {
731 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
732 u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
733 unsigned long flags;
734 int removed = 1;
735
736 spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
737
738 if (rcu_dereference_protected(rvp->qp[0],
739 lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
740 RCU_INIT_POINTER(rvp->qp[0], NULL);
741 } else if (rcu_dereference_protected(rvp->qp[1],
742 lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
743 RCU_INIT_POINTER(rvp->qp[1], NULL);
744 } else {
745 struct rvt_qp *q;
746 struct rvt_qp __rcu **qpp;
747
748 removed = 0;
749 qpp = &rdi->qp_dev->qp_table[n];
750 for (; (q = rcu_dereference_protected(*qpp,
751 lockdep_is_held(&rdi->qp_dev->qpt_lock))) != NULL;
752 qpp = &q->next) {
753 if (q == qp) {
754 RCU_INIT_POINTER(*qpp,
755 rcu_dereference_protected(qp->next,
756 lockdep_is_held(&rdi->qp_dev->qpt_lock)));
757 removed = 1;
758 trace_rvt_qpremove(qp, n);
759 break;
760 }
761 }
762 }
763
764 spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags);
765 if (removed) {
766 synchronize_rcu();
767 rvt_put_qp(qp);
768 }
769 }
770
771 /**
772 * rvt_alloc_rq - allocate memory for user or kernel buffer
773 * @rq: receive queue data structure
774 * @size: number of request queue entries
775 * @node: The NUMA node
776 * @udata: True if user data is available or not false
777 *
778 * Return: If memory allocation failed, return -ENONEM
779 * This function is used by both shared receive
780 * queues and non-shared receive queues to allocate
781 * memory.
782 */
rvt_alloc_rq(struct rvt_rq * rq,u32 size,int node,struct ib_udata * udata)783 int rvt_alloc_rq(struct rvt_rq *rq, u32 size, int node,
784 struct ib_udata *udata)
785 {
786 if (udata) {
787 rq->wq = vmalloc_user(sizeof(struct rvt_rwq) + size);
788 if (!rq->wq)
789 goto bail;
790 /* need kwq with no buffers */
791 rq->kwq = kzalloc_node(sizeof(*rq->kwq), GFP_KERNEL, node);
792 if (!rq->kwq)
793 goto bail;
794 rq->kwq->curr_wq = rq->wq->wq;
795 } else {
796 /* need kwq with buffers */
797 rq->kwq =
798 vzalloc_node(sizeof(struct rvt_krwq) + size, node);
799 if (!rq->kwq)
800 goto bail;
801 rq->kwq->curr_wq = rq->kwq->wq;
802 }
803
804 spin_lock_init(&rq->kwq->p_lock);
805 spin_lock_init(&rq->kwq->c_lock);
806 return 0;
807 bail:
808 rvt_free_rq(rq);
809 return -ENOMEM;
810 }
811
812 /**
813 * rvt_init_qp - initialize the QP state to the reset state
814 * @rdi: rvt dev struct
815 * @qp: the QP to init or reinit
816 * @type: the QP type
817 *
818 * This function is called from both rvt_create_qp() and
819 * rvt_reset_qp(). The difference is that the reset
820 * patch the necessary locks to protect against concurent
821 * access.
822 */
rvt_init_qp(struct rvt_dev_info * rdi,struct rvt_qp * qp,enum ib_qp_type type)823 static void rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
824 enum ib_qp_type type)
825 {
826 qp->remote_qpn = 0;
827 qp->qkey = 0;
828 qp->qp_access_flags = 0;
829 qp->s_flags &= RVT_S_SIGNAL_REQ_WR;
830 qp->s_hdrwords = 0;
831 qp->s_wqe = NULL;
832 qp->s_draining = 0;
833 qp->s_next_psn = 0;
834 qp->s_last_psn = 0;
835 qp->s_sending_psn = 0;
836 qp->s_sending_hpsn = 0;
837 qp->s_psn = 0;
838 qp->r_psn = 0;
839 qp->r_msn = 0;
840 if (type == IB_QPT_RC) {
841 qp->s_state = IB_OPCODE_RC_SEND_LAST;
842 qp->r_state = IB_OPCODE_RC_SEND_LAST;
843 } else {
844 qp->s_state = IB_OPCODE_UC_SEND_LAST;
845 qp->r_state = IB_OPCODE_UC_SEND_LAST;
846 }
847 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
848 qp->r_nak_state = 0;
849 qp->r_aflags = 0;
850 qp->r_flags = 0;
851 qp->s_head = 0;
852 qp->s_tail = 0;
853 qp->s_cur = 0;
854 qp->s_acked = 0;
855 qp->s_last = 0;
856 qp->s_ssn = 1;
857 qp->s_lsn = 0;
858 qp->s_mig_state = IB_MIG_MIGRATED;
859 qp->r_head_ack_queue = 0;
860 qp->s_tail_ack_queue = 0;
861 qp->s_acked_ack_queue = 0;
862 qp->s_num_rd_atomic = 0;
863 qp->r_sge.num_sge = 0;
864 atomic_set(&qp->s_reserved_used, 0);
865 }
866
867 /**
868 * _rvt_reset_qp - initialize the QP state to the reset state
869 * @rdi: rvt dev struct
870 * @qp: the QP to reset
871 * @type: the QP type
872 *
873 * r_lock, s_hlock, and s_lock are required to be held by the caller
874 */
_rvt_reset_qp(struct rvt_dev_info * rdi,struct rvt_qp * qp,enum ib_qp_type type)875 static void _rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
876 enum ib_qp_type type)
877 __must_hold(&qp->s_lock)
878 __must_hold(&qp->s_hlock)
879 __must_hold(&qp->r_lock)
880 {
881 lockdep_assert_held(&qp->r_lock);
882 lockdep_assert_held(&qp->s_hlock);
883 lockdep_assert_held(&qp->s_lock);
884 if (qp->state != IB_QPS_RESET) {
885 qp->state = IB_QPS_RESET;
886
887 /* Let drivers flush their waitlist */
888 rdi->driver_f.flush_qp_waiters(qp);
889 rvt_stop_rc_timers(qp);
890 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT);
891 spin_unlock(&qp->s_lock);
892 spin_unlock(&qp->s_hlock);
893 spin_unlock_irq(&qp->r_lock);
894
895 /* Stop the send queue and the retry timer */
896 rdi->driver_f.stop_send_queue(qp);
897 rvt_del_timers_sync(qp);
898 /* Wait for things to stop */
899 rdi->driver_f.quiesce_qp(qp);
900
901 /* take qp out the hash and wait for it to be unused */
902 rvt_remove_qp(rdi, qp);
903
904 /* grab the lock b/c it was locked at call time */
905 spin_lock_irq(&qp->r_lock);
906 spin_lock(&qp->s_hlock);
907 spin_lock(&qp->s_lock);
908
909 rvt_clear_mr_refs(qp, 1);
910 /*
911 * Let the driver do any tear down or re-init it needs to for
912 * a qp that has been reset
913 */
914 rdi->driver_f.notify_qp_reset(qp);
915 }
916 rvt_init_qp(rdi, qp, type);
917 lockdep_assert_held(&qp->r_lock);
918 lockdep_assert_held(&qp->s_hlock);
919 lockdep_assert_held(&qp->s_lock);
920 }
921
922 /**
923 * rvt_reset_qp - initialize the QP state to the reset state
924 * @rdi: the device info
925 * @qp: the QP to reset
926 * @type: the QP type
927 *
928 * This is the wrapper function to acquire the r_lock, s_hlock, and s_lock
929 * before calling _rvt_reset_qp().
930 */
rvt_reset_qp(struct rvt_dev_info * rdi,struct rvt_qp * qp,enum ib_qp_type type)931 static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
932 enum ib_qp_type type)
933 {
934 spin_lock_irq(&qp->r_lock);
935 spin_lock(&qp->s_hlock);
936 spin_lock(&qp->s_lock);
937 _rvt_reset_qp(rdi, qp, type);
938 spin_unlock(&qp->s_lock);
939 spin_unlock(&qp->s_hlock);
940 spin_unlock_irq(&qp->r_lock);
941 }
942
943 /**
944 * rvt_free_qpn - Free a qpn from the bit map
945 * @qpt: QP table
946 * @qpn: queue pair number to free
947 */
rvt_free_qpn(struct rvt_qpn_table * qpt,u32 qpn)948 static void rvt_free_qpn(struct rvt_qpn_table *qpt, u32 qpn)
949 {
950 struct rvt_qpn_map *map;
951
952 if ((qpn & RVT_AIP_QP_PREFIX_MASK) == RVT_AIP_QP_BASE)
953 qpn &= RVT_AIP_QP_SUFFIX;
954
955 map = qpt->map + (qpn & RVT_QPN_MASK) / RVT_BITS_PER_PAGE;
956 if (map->page)
957 clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
958 }
959
960 /**
961 * get_allowed_ops - Given a QP type return the appropriate allowed OP
962 * @type: valid, supported, QP type
963 */
get_allowed_ops(enum ib_qp_type type)964 static u8 get_allowed_ops(enum ib_qp_type type)
965 {
966 return type == IB_QPT_RC ? IB_OPCODE_RC : type == IB_QPT_UC ?
967 IB_OPCODE_UC : IB_OPCODE_UD;
968 }
969
970 /**
971 * free_ud_wq_attr - Clean up AH attribute cache for UD QPs
972 * @qp: Valid QP with allowed_ops set
973 *
974 * The rvt_swqe data structure being used is a union, so this is
975 * only valid for UD QPs.
976 */
free_ud_wq_attr(struct rvt_qp * qp)977 static void free_ud_wq_attr(struct rvt_qp *qp)
978 {
979 struct rvt_swqe *wqe;
980 int i;
981
982 for (i = 0; qp->allowed_ops == IB_OPCODE_UD && i < qp->s_size; i++) {
983 wqe = rvt_get_swqe_ptr(qp, i);
984 kfree(wqe->ud_wr.attr);
985 wqe->ud_wr.attr = NULL;
986 }
987 }
988
989 /**
990 * alloc_ud_wq_attr - AH attribute cache for UD QPs
991 * @qp: Valid QP with allowed_ops set
992 * @node: Numa node for allocation
993 *
994 * The rvt_swqe data structure being used is a union, so this is
995 * only valid for UD QPs.
996 */
alloc_ud_wq_attr(struct rvt_qp * qp,int node)997 static int alloc_ud_wq_attr(struct rvt_qp *qp, int node)
998 {
999 struct rvt_swqe *wqe;
1000 int i;
1001
1002 for (i = 0; qp->allowed_ops == IB_OPCODE_UD && i < qp->s_size; i++) {
1003 wqe = rvt_get_swqe_ptr(qp, i);
1004 wqe->ud_wr.attr = kzalloc_node(sizeof(*wqe->ud_wr.attr),
1005 GFP_KERNEL, node);
1006 if (!wqe->ud_wr.attr) {
1007 free_ud_wq_attr(qp);
1008 return -ENOMEM;
1009 }
1010 }
1011
1012 return 0;
1013 }
1014
1015 /**
1016 * rvt_create_qp - create a queue pair for a device
1017 * @ibqp: the queue pair
1018 * @init_attr: the attributes of the queue pair
1019 * @udata: user data for libibverbs.so
1020 *
1021 * Queue pair creation is mostly an rvt issue. However, drivers have their own
1022 * unique idea of what queue pair numbers mean. For instance there is a reserved
1023 * range for PSM.
1024 *
1025 * Return: 0 on success, otherwise returns an errno.
1026 *
1027 * Called by the ib_create_qp() core verbs function.
1028 */
rvt_create_qp(struct ib_qp * ibqp,struct ib_qp_init_attr * init_attr,struct ib_udata * udata)1029 int rvt_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
1030 struct ib_udata *udata)
1031 {
1032 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1033 int ret = -ENOMEM;
1034 struct rvt_swqe *swq = NULL;
1035 size_t sz;
1036 size_t sg_list_sz = 0;
1037 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1038 void *priv = NULL;
1039 size_t sqsize;
1040 u8 exclude_prefix = 0;
1041
1042 if (!rdi)
1043 return -EINVAL;
1044
1045 if (init_attr->create_flags & ~IB_QP_CREATE_NETDEV_USE)
1046 return -EOPNOTSUPP;
1047
1048 if (init_attr->cap.max_send_sge > rdi->dparms.props.max_send_sge ||
1049 init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr)
1050 return -EINVAL;
1051
1052 /* Check receive queue parameters if no SRQ is specified. */
1053 if (!init_attr->srq) {
1054 if (init_attr->cap.max_recv_sge >
1055 rdi->dparms.props.max_recv_sge ||
1056 init_attr->cap.max_recv_wr > rdi->dparms.props.max_qp_wr)
1057 return -EINVAL;
1058
1059 if (init_attr->cap.max_send_sge +
1060 init_attr->cap.max_send_wr +
1061 init_attr->cap.max_recv_sge +
1062 init_attr->cap.max_recv_wr == 0)
1063 return -EINVAL;
1064 }
1065 sqsize =
1066 init_attr->cap.max_send_wr + 1 +
1067 rdi->dparms.reserved_operations;
1068 switch (init_attr->qp_type) {
1069 case IB_QPT_SMI:
1070 case IB_QPT_GSI:
1071 if (init_attr->port_num == 0 ||
1072 init_attr->port_num > ibqp->device->phys_port_cnt)
1073 return -EINVAL;
1074 fallthrough;
1075 case IB_QPT_UC:
1076 case IB_QPT_RC:
1077 case IB_QPT_UD:
1078 sz = struct_size(swq, sg_list, init_attr->cap.max_send_sge);
1079 swq = vzalloc_node(array_size(sz, sqsize), rdi->dparms.node);
1080 if (!swq)
1081 return -ENOMEM;
1082
1083 if (init_attr->srq) {
1084 struct rvt_srq *srq = ibsrq_to_rvtsrq(init_attr->srq);
1085
1086 if (srq->rq.max_sge > 1)
1087 sg_list_sz = sizeof(*qp->r_sg_list) *
1088 (srq->rq.max_sge - 1);
1089 } else if (init_attr->cap.max_recv_sge > 1)
1090 sg_list_sz = sizeof(*qp->r_sg_list) *
1091 (init_attr->cap.max_recv_sge - 1);
1092 qp->r_sg_list =
1093 kzalloc_node(sg_list_sz, GFP_KERNEL, rdi->dparms.node);
1094 if (!qp->r_sg_list)
1095 goto bail_qp;
1096 qp->allowed_ops = get_allowed_ops(init_attr->qp_type);
1097
1098 RCU_INIT_POINTER(qp->next, NULL);
1099 if (init_attr->qp_type == IB_QPT_RC) {
1100 qp->s_ack_queue =
1101 kcalloc_node(rvt_max_atomic(rdi),
1102 sizeof(*qp->s_ack_queue),
1103 GFP_KERNEL,
1104 rdi->dparms.node);
1105 if (!qp->s_ack_queue)
1106 goto bail_qp;
1107 }
1108 /* initialize timers needed for rc qp */
1109 timer_setup(&qp->s_timer, rvt_rc_timeout, 0);
1110 hrtimer_setup(&qp->s_rnr_timer, rvt_rc_rnr_retry, CLOCK_MONOTONIC,
1111 HRTIMER_MODE_REL);
1112
1113 /*
1114 * Driver needs to set up it's private QP structure and do any
1115 * initialization that is needed.
1116 */
1117 priv = rdi->driver_f.qp_priv_alloc(rdi, qp);
1118 if (IS_ERR(priv)) {
1119 ret = PTR_ERR(priv);
1120 goto bail_qp;
1121 }
1122 qp->priv = priv;
1123 qp->timeout_jiffies =
1124 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
1125 1000UL);
1126 if (init_attr->srq) {
1127 sz = 0;
1128 } else {
1129 qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
1130 qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
1131 sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
1132 sizeof(struct rvt_rwqe);
1133 ret = rvt_alloc_rq(&qp->r_rq, qp->r_rq.size * sz,
1134 rdi->dparms.node, udata);
1135 if (ret)
1136 goto bail_driver_priv;
1137 }
1138
1139 /*
1140 * ib_create_qp() will initialize qp->ibqp
1141 * except for qp->ibqp.qp_num.
1142 */
1143 spin_lock_init(&qp->r_lock);
1144 spin_lock_init(&qp->s_hlock);
1145 spin_lock_init(&qp->s_lock);
1146 atomic_set(&qp->refcount, 0);
1147 atomic_set(&qp->local_ops_pending, 0);
1148 init_waitqueue_head(&qp->wait);
1149 INIT_LIST_HEAD(&qp->rspwait);
1150 qp->state = IB_QPS_RESET;
1151 qp->s_wq = swq;
1152 qp->s_size = sqsize;
1153 qp->s_avail = init_attr->cap.max_send_wr;
1154 qp->s_max_sge = init_attr->cap.max_send_sge;
1155 if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
1156 qp->s_flags = RVT_S_SIGNAL_REQ_WR;
1157 ret = alloc_ud_wq_attr(qp, rdi->dparms.node);
1158 if (ret)
1159 goto bail_rq_rvt;
1160
1161 if (init_attr->create_flags & IB_QP_CREATE_NETDEV_USE)
1162 exclude_prefix = RVT_AIP_QP_PREFIX;
1163
1164 ret = alloc_qpn(rdi, &rdi->qp_dev->qpn_table,
1165 init_attr->qp_type,
1166 init_attr->port_num,
1167 exclude_prefix);
1168 if (ret < 0)
1169 goto bail_rq_wq;
1170
1171 qp->ibqp.qp_num = ret;
1172 if (init_attr->create_flags & IB_QP_CREATE_NETDEV_USE)
1173 qp->ibqp.qp_num |= RVT_AIP_QP_BASE;
1174 qp->port_num = init_attr->port_num;
1175 rvt_init_qp(rdi, qp, init_attr->qp_type);
1176 if (rdi->driver_f.qp_priv_init) {
1177 ret = rdi->driver_f.qp_priv_init(rdi, qp, init_attr);
1178 if (ret)
1179 goto bail_rq_wq;
1180 }
1181 break;
1182
1183 default:
1184 /* Don't support raw QPs */
1185 return -EOPNOTSUPP;
1186 }
1187
1188 init_attr->cap.max_inline_data = 0;
1189
1190 /*
1191 * Return the address of the RWQ as the offset to mmap.
1192 * See rvt_mmap() for details.
1193 */
1194 if (udata && udata->outlen >= sizeof(__u64)) {
1195 if (!qp->r_rq.wq) {
1196 __u64 offset = 0;
1197
1198 ret = ib_copy_to_udata(udata, &offset,
1199 sizeof(offset));
1200 if (ret)
1201 goto bail_qpn;
1202 } else {
1203 u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz;
1204
1205 qp->ip = rvt_create_mmap_info(rdi, s, udata,
1206 qp->r_rq.wq);
1207 if (IS_ERR(qp->ip)) {
1208 ret = PTR_ERR(qp->ip);
1209 goto bail_qpn;
1210 }
1211
1212 ret = ib_copy_to_udata(udata, &qp->ip->offset,
1213 sizeof(qp->ip->offset));
1214 if (ret)
1215 goto bail_ip;
1216 }
1217 qp->pid = current->pid;
1218 }
1219
1220 spin_lock(&rdi->n_qps_lock);
1221 if (rdi->n_qps_allocated == rdi->dparms.props.max_qp) {
1222 spin_unlock(&rdi->n_qps_lock);
1223 ret = -ENOMEM;
1224 goto bail_ip;
1225 }
1226
1227 rdi->n_qps_allocated++;
1228 /*
1229 * Maintain a busy_jiffies variable that will be added to the timeout
1230 * period in mod_retry_timer and add_retry_timer. This busy jiffies
1231 * is scaled by the number of rc qps created for the device to reduce
1232 * the number of timeouts occurring when there is a large number of
1233 * qps. busy_jiffies is incremented every rc qp scaling interval.
1234 * The scaling interval is selected based on extensive performance
1235 * evaluation of targeted workloads.
1236 */
1237 if (init_attr->qp_type == IB_QPT_RC) {
1238 rdi->n_rc_qps++;
1239 rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL;
1240 }
1241 spin_unlock(&rdi->n_qps_lock);
1242
1243 if (qp->ip) {
1244 spin_lock_irq(&rdi->pending_lock);
1245 list_add(&qp->ip->pending_mmaps, &rdi->pending_mmaps);
1246 spin_unlock_irq(&rdi->pending_lock);
1247 }
1248
1249 return 0;
1250
1251 bail_ip:
1252 if (qp->ip)
1253 kref_put(&qp->ip->ref, rvt_release_mmap_info);
1254
1255 bail_qpn:
1256 rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
1257
1258 bail_rq_wq:
1259 free_ud_wq_attr(qp);
1260
1261 bail_rq_rvt:
1262 rvt_free_rq(&qp->r_rq);
1263
1264 bail_driver_priv:
1265 rdi->driver_f.qp_priv_free(rdi, qp);
1266
1267 bail_qp:
1268 kfree(qp->s_ack_queue);
1269 kfree(qp->r_sg_list);
1270 vfree(swq);
1271 return ret;
1272 }
1273
1274 /**
1275 * rvt_error_qp - put a QP into the error state
1276 * @qp: the QP to put into the error state
1277 * @err: the receive completion error to signal if a RWQE is active
1278 *
1279 * Flushes both send and receive work queues.
1280 *
1281 * Return: true if last WQE event should be generated.
1282 * The QP r_lock and s_lock should be held and interrupts disabled.
1283 * If we are already in error state, just return.
1284 */
rvt_error_qp(struct rvt_qp * qp,enum ib_wc_status err)1285 int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err)
1286 {
1287 struct ib_wc wc;
1288 int ret = 0;
1289 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
1290
1291 lockdep_assert_held(&qp->r_lock);
1292 lockdep_assert_held(&qp->s_lock);
1293 if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
1294 goto bail;
1295
1296 qp->state = IB_QPS_ERR;
1297
1298 if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
1299 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
1300 timer_delete(&qp->s_timer);
1301 }
1302
1303 if (qp->s_flags & RVT_S_ANY_WAIT_SEND)
1304 qp->s_flags &= ~RVT_S_ANY_WAIT_SEND;
1305
1306 rdi->driver_f.notify_error_qp(qp);
1307
1308 /* Schedule the sending tasklet to drain the send work queue. */
1309 if (READ_ONCE(qp->s_last) != qp->s_head)
1310 rdi->driver_f.schedule_send(qp);
1311
1312 rvt_clear_mr_refs(qp, 0);
1313
1314 memset(&wc, 0, sizeof(wc));
1315 wc.qp = &qp->ibqp;
1316 wc.opcode = IB_WC_RECV;
1317
1318 if (test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) {
1319 wc.wr_id = qp->r_wr_id;
1320 wc.status = err;
1321 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
1322 }
1323 wc.status = IB_WC_WR_FLUSH_ERR;
1324
1325 if (qp->r_rq.kwq) {
1326 u32 head;
1327 u32 tail;
1328 struct rvt_rwq *wq = NULL;
1329 struct rvt_krwq *kwq = NULL;
1330
1331 spin_lock(&qp->r_rq.kwq->c_lock);
1332 /* qp->ip used to validate if there is a user buffer mmaped */
1333 if (qp->ip) {
1334 wq = qp->r_rq.wq;
1335 head = RDMA_READ_UAPI_ATOMIC(wq->head);
1336 tail = RDMA_READ_UAPI_ATOMIC(wq->tail);
1337 } else {
1338 kwq = qp->r_rq.kwq;
1339 head = kwq->head;
1340 tail = kwq->tail;
1341 }
1342 /* sanity check pointers before trusting them */
1343 if (head >= qp->r_rq.size)
1344 head = 0;
1345 if (tail >= qp->r_rq.size)
1346 tail = 0;
1347 while (tail != head) {
1348 wc.wr_id = rvt_get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
1349 if (++tail >= qp->r_rq.size)
1350 tail = 0;
1351 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
1352 }
1353 if (qp->ip)
1354 RDMA_WRITE_UAPI_ATOMIC(wq->tail, tail);
1355 else
1356 kwq->tail = tail;
1357 spin_unlock(&qp->r_rq.kwq->c_lock);
1358 } else if (qp->ibqp.event_handler) {
1359 ret = 1;
1360 }
1361
1362 bail:
1363 return ret;
1364 }
1365 EXPORT_SYMBOL(rvt_error_qp);
1366
1367 /*
1368 * Put the QP into the hash table.
1369 * The hash table holds a reference to the QP.
1370 */
rvt_insert_qp(struct rvt_dev_info * rdi,struct rvt_qp * qp)1371 static void rvt_insert_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
1372 {
1373 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
1374 unsigned long flags;
1375
1376 rvt_get_qp(qp);
1377 spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
1378
1379 if (qp->ibqp.qp_num <= 1) {
1380 rcu_assign_pointer(rvp->qp[qp->ibqp.qp_num], qp);
1381 } else {
1382 u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
1383
1384 qp->next = rdi->qp_dev->qp_table[n];
1385 rcu_assign_pointer(rdi->qp_dev->qp_table[n], qp);
1386 trace_rvt_qpinsert(qp, n);
1387 }
1388
1389 spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags);
1390 }
1391
1392 /**
1393 * rvt_modify_qp - modify the attributes of a queue pair
1394 * @ibqp: the queue pair who's attributes we're modifying
1395 * @attr: the new attributes
1396 * @attr_mask: the mask of attributes to modify
1397 * @udata: user data for libibverbs.so
1398 *
1399 * Return: 0 on success, otherwise returns an errno.
1400 */
rvt_modify_qp(struct ib_qp * ibqp,struct ib_qp_attr * attr,int attr_mask,struct ib_udata * udata)1401 int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1402 int attr_mask, struct ib_udata *udata)
1403 {
1404 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1405 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1406 enum ib_qp_state cur_state, new_state;
1407 struct ib_event ev;
1408 int lastwqe = 0;
1409 int mig = 0;
1410 int pmtu = 0; /* for gcc warning only */
1411 int opa_ah;
1412
1413 if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
1414 return -EOPNOTSUPP;
1415
1416 spin_lock_irq(&qp->r_lock);
1417 spin_lock(&qp->s_hlock);
1418 spin_lock(&qp->s_lock);
1419
1420 cur_state = attr_mask & IB_QP_CUR_STATE ?
1421 attr->cur_qp_state : qp->state;
1422 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
1423 opa_ah = rdma_cap_opa_ah(ibqp->device, qp->port_num);
1424
1425 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
1426 attr_mask))
1427 goto inval;
1428
1429 if (rdi->driver_f.check_modify_qp &&
1430 rdi->driver_f.check_modify_qp(qp, attr, attr_mask, udata))
1431 goto inval;
1432
1433 if (attr_mask & IB_QP_AV) {
1434 if (opa_ah) {
1435 if (rdma_ah_get_dlid(&attr->ah_attr) >=
1436 opa_get_mcast_base(OPA_MCAST_NR))
1437 goto inval;
1438 } else {
1439 if (rdma_ah_get_dlid(&attr->ah_attr) >=
1440 be16_to_cpu(IB_MULTICAST_LID_BASE))
1441 goto inval;
1442 }
1443
1444 if (rvt_check_ah(qp->ibqp.device, &attr->ah_attr))
1445 goto inval;
1446 }
1447
1448 if (attr_mask & IB_QP_ALT_PATH) {
1449 if (opa_ah) {
1450 if (rdma_ah_get_dlid(&attr->alt_ah_attr) >=
1451 opa_get_mcast_base(OPA_MCAST_NR))
1452 goto inval;
1453 } else {
1454 if (rdma_ah_get_dlid(&attr->alt_ah_attr) >=
1455 be16_to_cpu(IB_MULTICAST_LID_BASE))
1456 goto inval;
1457 }
1458
1459 if (rvt_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
1460 goto inval;
1461 if (attr->alt_pkey_index >= rvt_get_npkeys(rdi))
1462 goto inval;
1463 }
1464
1465 if (attr_mask & IB_QP_PKEY_INDEX)
1466 if (attr->pkey_index >= rvt_get_npkeys(rdi))
1467 goto inval;
1468
1469 if (attr_mask & IB_QP_MIN_RNR_TIMER)
1470 if (attr->min_rnr_timer > 31)
1471 goto inval;
1472
1473 if (attr_mask & IB_QP_PORT)
1474 if (qp->ibqp.qp_type == IB_QPT_SMI ||
1475 qp->ibqp.qp_type == IB_QPT_GSI ||
1476 attr->port_num == 0 ||
1477 attr->port_num > ibqp->device->phys_port_cnt)
1478 goto inval;
1479
1480 if (attr_mask & IB_QP_DEST_QPN)
1481 if (attr->dest_qp_num > RVT_QPN_MASK)
1482 goto inval;
1483
1484 if (attr_mask & IB_QP_RETRY_CNT)
1485 if (attr->retry_cnt > 7)
1486 goto inval;
1487
1488 if (attr_mask & IB_QP_RNR_RETRY)
1489 if (attr->rnr_retry > 7)
1490 goto inval;
1491
1492 /*
1493 * Don't allow invalid path_mtu values. OK to set greater
1494 * than the active mtu (or even the max_cap, if we have tuned
1495 * that to a small mtu. We'll set qp->path_mtu
1496 * to the lesser of requested attribute mtu and active,
1497 * for packetizing messages.
1498 * Note that the QP port has to be set in INIT and MTU in RTR.
1499 */
1500 if (attr_mask & IB_QP_PATH_MTU) {
1501 pmtu = rdi->driver_f.get_pmtu_from_attr(rdi, qp, attr);
1502 if (pmtu < 0)
1503 goto inval;
1504 }
1505
1506 if (attr_mask & IB_QP_PATH_MIG_STATE) {
1507 if (attr->path_mig_state == IB_MIG_REARM) {
1508 if (qp->s_mig_state == IB_MIG_ARMED)
1509 goto inval;
1510 if (new_state != IB_QPS_RTS)
1511 goto inval;
1512 } else if (attr->path_mig_state == IB_MIG_MIGRATED) {
1513 if (qp->s_mig_state == IB_MIG_REARM)
1514 goto inval;
1515 if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD)
1516 goto inval;
1517 if (qp->s_mig_state == IB_MIG_ARMED)
1518 mig = 1;
1519 } else {
1520 goto inval;
1521 }
1522 }
1523
1524 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1525 if (attr->max_dest_rd_atomic > rdi->dparms.max_rdma_atomic)
1526 goto inval;
1527
1528 switch (new_state) {
1529 case IB_QPS_RESET:
1530 if (qp->state != IB_QPS_RESET)
1531 _rvt_reset_qp(rdi, qp, ibqp->qp_type);
1532 break;
1533
1534 case IB_QPS_RTR:
1535 /* Allow event to re-trigger if QP set to RTR more than once */
1536 qp->r_flags &= ~RVT_R_COMM_EST;
1537 qp->state = new_state;
1538 break;
1539
1540 case IB_QPS_SQD:
1541 qp->s_draining = qp->s_last != qp->s_cur;
1542 qp->state = new_state;
1543 break;
1544
1545 case IB_QPS_SQE:
1546 if (qp->ibqp.qp_type == IB_QPT_RC)
1547 goto inval;
1548 qp->state = new_state;
1549 break;
1550
1551 case IB_QPS_ERR:
1552 lastwqe = rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1553 break;
1554
1555 default:
1556 qp->state = new_state;
1557 break;
1558 }
1559
1560 if (attr_mask & IB_QP_PKEY_INDEX)
1561 qp->s_pkey_index = attr->pkey_index;
1562
1563 if (attr_mask & IB_QP_PORT)
1564 qp->port_num = attr->port_num;
1565
1566 if (attr_mask & IB_QP_DEST_QPN)
1567 qp->remote_qpn = attr->dest_qp_num;
1568
1569 if (attr_mask & IB_QP_SQ_PSN) {
1570 qp->s_next_psn = attr->sq_psn & rdi->dparms.psn_modify_mask;
1571 qp->s_psn = qp->s_next_psn;
1572 qp->s_sending_psn = qp->s_next_psn;
1573 qp->s_last_psn = qp->s_next_psn - 1;
1574 qp->s_sending_hpsn = qp->s_last_psn;
1575 }
1576
1577 if (attr_mask & IB_QP_RQ_PSN)
1578 qp->r_psn = attr->rq_psn & rdi->dparms.psn_modify_mask;
1579
1580 if (attr_mask & IB_QP_ACCESS_FLAGS)
1581 qp->qp_access_flags = attr->qp_access_flags;
1582
1583 if (attr_mask & IB_QP_AV) {
1584 rdma_replace_ah_attr(&qp->remote_ah_attr, &attr->ah_attr);
1585 qp->s_srate = rdma_ah_get_static_rate(&attr->ah_attr);
1586 qp->srate_mbps = ib_rate_to_mbps(qp->s_srate);
1587 }
1588
1589 if (attr_mask & IB_QP_ALT_PATH) {
1590 rdma_replace_ah_attr(&qp->alt_ah_attr, &attr->alt_ah_attr);
1591 qp->s_alt_pkey_index = attr->alt_pkey_index;
1592 }
1593
1594 if (attr_mask & IB_QP_PATH_MIG_STATE) {
1595 qp->s_mig_state = attr->path_mig_state;
1596 if (mig) {
1597 qp->remote_ah_attr = qp->alt_ah_attr;
1598 qp->port_num = rdma_ah_get_port_num(&qp->alt_ah_attr);
1599 qp->s_pkey_index = qp->s_alt_pkey_index;
1600 }
1601 }
1602
1603 if (attr_mask & IB_QP_PATH_MTU) {
1604 qp->pmtu = rdi->driver_f.mtu_from_qp(rdi, qp, pmtu);
1605 qp->log_pmtu = ilog2(qp->pmtu);
1606 }
1607
1608 if (attr_mask & IB_QP_RETRY_CNT) {
1609 qp->s_retry_cnt = attr->retry_cnt;
1610 qp->s_retry = attr->retry_cnt;
1611 }
1612
1613 if (attr_mask & IB_QP_RNR_RETRY) {
1614 qp->s_rnr_retry_cnt = attr->rnr_retry;
1615 qp->s_rnr_retry = attr->rnr_retry;
1616 }
1617
1618 if (attr_mask & IB_QP_MIN_RNR_TIMER)
1619 qp->r_min_rnr_timer = attr->min_rnr_timer;
1620
1621 if (attr_mask & IB_QP_TIMEOUT) {
1622 qp->timeout = attr->timeout;
1623 qp->timeout_jiffies = rvt_timeout_to_jiffies(qp->timeout);
1624 }
1625
1626 if (attr_mask & IB_QP_QKEY)
1627 qp->qkey = attr->qkey;
1628
1629 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1630 qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
1631
1632 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
1633 qp->s_max_rd_atomic = attr->max_rd_atomic;
1634
1635 if (rdi->driver_f.modify_qp)
1636 rdi->driver_f.modify_qp(qp, attr, attr_mask, udata);
1637
1638 spin_unlock(&qp->s_lock);
1639 spin_unlock(&qp->s_hlock);
1640 spin_unlock_irq(&qp->r_lock);
1641
1642 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1643 rvt_insert_qp(rdi, qp);
1644
1645 if (lastwqe) {
1646 ev.device = qp->ibqp.device;
1647 ev.element.qp = &qp->ibqp;
1648 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
1649 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1650 }
1651 if (mig) {
1652 ev.device = qp->ibqp.device;
1653 ev.element.qp = &qp->ibqp;
1654 ev.event = IB_EVENT_PATH_MIG;
1655 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1656 }
1657 return 0;
1658
1659 inval:
1660 spin_unlock(&qp->s_lock);
1661 spin_unlock(&qp->s_hlock);
1662 spin_unlock_irq(&qp->r_lock);
1663 return -EINVAL;
1664 }
1665
1666 /**
1667 * rvt_destroy_qp - destroy a queue pair
1668 * @ibqp: the queue pair to destroy
1669 * @udata: unused by the driver
1670 *
1671 * Note that this can be called while the QP is actively sending or
1672 * receiving!
1673 *
1674 * Return: 0 on success.
1675 */
rvt_destroy_qp(struct ib_qp * ibqp,struct ib_udata * udata)1676 int rvt_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
1677 {
1678 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1679 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1680
1681 rvt_reset_qp(rdi, qp, ibqp->qp_type);
1682
1683 wait_event(qp->wait, !atomic_read(&qp->refcount));
1684 /* qpn is now available for use again */
1685 rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
1686
1687 spin_lock(&rdi->n_qps_lock);
1688 rdi->n_qps_allocated--;
1689 if (qp->ibqp.qp_type == IB_QPT_RC) {
1690 rdi->n_rc_qps--;
1691 rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL;
1692 }
1693 spin_unlock(&rdi->n_qps_lock);
1694
1695 if (qp->ip)
1696 kref_put(&qp->ip->ref, rvt_release_mmap_info);
1697 kvfree(qp->r_rq.kwq);
1698 rdi->driver_f.qp_priv_free(rdi, qp);
1699 kfree(qp->s_ack_queue);
1700 kfree(qp->r_sg_list);
1701 rdma_destroy_ah_attr(&qp->remote_ah_attr);
1702 rdma_destroy_ah_attr(&qp->alt_ah_attr);
1703 free_ud_wq_attr(qp);
1704 vfree(qp->s_wq);
1705 return 0;
1706 }
1707
1708 /**
1709 * rvt_query_qp - query an ipbq
1710 * @ibqp: IB qp to query
1711 * @attr: attr struct to fill in
1712 * @attr_mask: attr mask ignored
1713 * @init_attr: struct to fill in
1714 *
1715 * Return: always 0
1716 */
rvt_query_qp(struct ib_qp * ibqp,struct ib_qp_attr * attr,int attr_mask,struct ib_qp_init_attr * init_attr)1717 int rvt_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1718 int attr_mask, struct ib_qp_init_attr *init_attr)
1719 {
1720 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1721 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1722
1723 attr->qp_state = qp->state;
1724 attr->cur_qp_state = attr->qp_state;
1725 attr->path_mtu = rdi->driver_f.mtu_to_path_mtu(qp->pmtu);
1726 attr->path_mig_state = qp->s_mig_state;
1727 attr->qkey = qp->qkey;
1728 attr->rq_psn = qp->r_psn & rdi->dparms.psn_mask;
1729 attr->sq_psn = qp->s_next_psn & rdi->dparms.psn_mask;
1730 attr->dest_qp_num = qp->remote_qpn;
1731 attr->qp_access_flags = qp->qp_access_flags;
1732 attr->cap.max_send_wr = qp->s_size - 1 -
1733 rdi->dparms.reserved_operations;
1734 attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
1735 attr->cap.max_send_sge = qp->s_max_sge;
1736 attr->cap.max_recv_sge = qp->r_rq.max_sge;
1737 attr->cap.max_inline_data = 0;
1738 attr->ah_attr = qp->remote_ah_attr;
1739 attr->alt_ah_attr = qp->alt_ah_attr;
1740 attr->pkey_index = qp->s_pkey_index;
1741 attr->alt_pkey_index = qp->s_alt_pkey_index;
1742 attr->en_sqd_async_notify = 0;
1743 attr->sq_draining = qp->s_draining;
1744 attr->max_rd_atomic = qp->s_max_rd_atomic;
1745 attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
1746 attr->min_rnr_timer = qp->r_min_rnr_timer;
1747 attr->port_num = qp->port_num;
1748 attr->timeout = qp->timeout;
1749 attr->retry_cnt = qp->s_retry_cnt;
1750 attr->rnr_retry = qp->s_rnr_retry_cnt;
1751 attr->alt_port_num =
1752 rdma_ah_get_port_num(&qp->alt_ah_attr);
1753 attr->alt_timeout = qp->alt_timeout;
1754
1755 init_attr->event_handler = qp->ibqp.event_handler;
1756 init_attr->qp_context = qp->ibqp.qp_context;
1757 init_attr->send_cq = qp->ibqp.send_cq;
1758 init_attr->recv_cq = qp->ibqp.recv_cq;
1759 init_attr->srq = qp->ibqp.srq;
1760 init_attr->cap = attr->cap;
1761 if (qp->s_flags & RVT_S_SIGNAL_REQ_WR)
1762 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
1763 else
1764 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
1765 init_attr->qp_type = qp->ibqp.qp_type;
1766 init_attr->port_num = qp->port_num;
1767 return 0;
1768 }
1769
1770 /**
1771 * rvt_post_recv - post a receive on a QP
1772 * @ibqp: the QP to post the receive on
1773 * @wr: the WR to post
1774 * @bad_wr: the first bad WR is put here
1775 *
1776 * This may be called from interrupt context.
1777 *
1778 * Return: 0 on success otherwise errno
1779 */
rvt_post_recv(struct ib_qp * ibqp,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)1780 int rvt_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
1781 const struct ib_recv_wr **bad_wr)
1782 {
1783 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1784 struct rvt_krwq *wq = qp->r_rq.kwq;
1785 unsigned long flags;
1786 int qp_err_flush = (ib_rvt_state_ops[qp->state] & RVT_FLUSH_RECV) &&
1787 !qp->ibqp.srq;
1788
1789 /* Check that state is OK to post receive. */
1790 if (!(ib_rvt_state_ops[qp->state] & RVT_POST_RECV_OK) || !wq) {
1791 *bad_wr = wr;
1792 return -EINVAL;
1793 }
1794
1795 for (; wr; wr = wr->next) {
1796 struct rvt_rwqe *wqe;
1797 u32 next;
1798 int i;
1799
1800 if ((unsigned)wr->num_sge > qp->r_rq.max_sge) {
1801 *bad_wr = wr;
1802 return -EINVAL;
1803 }
1804
1805 spin_lock_irqsave(&qp->r_rq.kwq->p_lock, flags);
1806 next = wq->head + 1;
1807 if (next >= qp->r_rq.size)
1808 next = 0;
1809 if (next == READ_ONCE(wq->tail)) {
1810 spin_unlock_irqrestore(&qp->r_rq.kwq->p_lock, flags);
1811 *bad_wr = wr;
1812 return -ENOMEM;
1813 }
1814 if (unlikely(qp_err_flush)) {
1815 struct ib_wc wc;
1816
1817 memset(&wc, 0, sizeof(wc));
1818 wc.qp = &qp->ibqp;
1819 wc.opcode = IB_WC_RECV;
1820 wc.wr_id = wr->wr_id;
1821 wc.status = IB_WC_WR_FLUSH_ERR;
1822 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
1823 } else {
1824 wqe = rvt_get_rwqe_ptr(&qp->r_rq, wq->head);
1825 wqe->wr_id = wr->wr_id;
1826 wqe->num_sge = wr->num_sge;
1827 for (i = 0; i < wr->num_sge; i++) {
1828 wqe->sg_list[i].addr = wr->sg_list[i].addr;
1829 wqe->sg_list[i].length = wr->sg_list[i].length;
1830 wqe->sg_list[i].lkey = wr->sg_list[i].lkey;
1831 }
1832 /*
1833 * Make sure queue entry is written
1834 * before the head index.
1835 */
1836 smp_store_release(&wq->head, next);
1837 }
1838 spin_unlock_irqrestore(&qp->r_rq.kwq->p_lock, flags);
1839 }
1840 return 0;
1841 }
1842
1843 /**
1844 * rvt_qp_valid_operation - validate post send wr request
1845 * @qp: the qp
1846 * @post_parms: the post send table for the driver
1847 * @wr: the work request
1848 *
1849 * The routine validates the operation based on the
1850 * validation table an returns the length of the operation
1851 * which can extend beyond the ib_send_bw. Operation
1852 * dependent flags key atomic operation validation.
1853 *
1854 * There is an exception for UD qps that validates the pd and
1855 * overrides the length to include the additional UD specific
1856 * length.
1857 *
1858 * Returns a negative error or the length of the work request
1859 * for building the swqe.
1860 */
rvt_qp_valid_operation(struct rvt_qp * qp,const struct rvt_operation_params * post_parms,const struct ib_send_wr * wr)1861 static inline int rvt_qp_valid_operation(
1862 struct rvt_qp *qp,
1863 const struct rvt_operation_params *post_parms,
1864 const struct ib_send_wr *wr)
1865 {
1866 int len;
1867
1868 if (wr->opcode >= RVT_OPERATION_MAX || !post_parms[wr->opcode].length)
1869 return -EINVAL;
1870 if (!(post_parms[wr->opcode].qpt_support & BIT(qp->ibqp.qp_type)))
1871 return -EINVAL;
1872 if ((post_parms[wr->opcode].flags & RVT_OPERATION_PRIV) &&
1873 ibpd_to_rvtpd(qp->ibqp.pd)->user)
1874 return -EINVAL;
1875 if (post_parms[wr->opcode].flags & RVT_OPERATION_ATOMIC_SGE &&
1876 (wr->num_sge == 0 ||
1877 wr->sg_list[0].length < sizeof(u64) ||
1878 wr->sg_list[0].addr & (sizeof(u64) - 1)))
1879 return -EINVAL;
1880 if (post_parms[wr->opcode].flags & RVT_OPERATION_ATOMIC &&
1881 !qp->s_max_rd_atomic)
1882 return -EINVAL;
1883 len = post_parms[wr->opcode].length;
1884 /* UD specific */
1885 if (qp->ibqp.qp_type != IB_QPT_UC &&
1886 qp->ibqp.qp_type != IB_QPT_RC) {
1887 if (qp->ibqp.pd != ud_wr(wr)->ah->pd)
1888 return -EINVAL;
1889 len = sizeof(struct ib_ud_wr);
1890 }
1891 return len;
1892 }
1893
1894 /**
1895 * rvt_qp_is_avail - determine queue capacity
1896 * @qp: the qp
1897 * @rdi: the rdmavt device
1898 * @reserved_op: is reserved operation
1899 *
1900 * This assumes the s_hlock is held but the s_last
1901 * qp variable is uncontrolled.
1902 *
1903 * For non reserved operations, the qp->s_avail
1904 * may be changed.
1905 *
1906 * The return value is zero or a -ENOMEM.
1907 */
rvt_qp_is_avail(struct rvt_qp * qp,struct rvt_dev_info * rdi,bool reserved_op)1908 static inline int rvt_qp_is_avail(
1909 struct rvt_qp *qp,
1910 struct rvt_dev_info *rdi,
1911 bool reserved_op)
1912 {
1913 u32 slast;
1914 u32 avail;
1915 u32 reserved_used;
1916
1917 /* see rvt_qp_wqe_unreserve() */
1918 smp_mb__before_atomic();
1919 if (unlikely(reserved_op)) {
1920 /* see rvt_qp_wqe_unreserve() */
1921 reserved_used = atomic_read(&qp->s_reserved_used);
1922 if (reserved_used >= rdi->dparms.reserved_operations)
1923 return -ENOMEM;
1924 return 0;
1925 }
1926 /* non-reserved operations */
1927 if (likely(qp->s_avail))
1928 return 0;
1929 /* See rvt_qp_complete_swqe() */
1930 slast = smp_load_acquire(&qp->s_last);
1931 if (qp->s_head >= slast)
1932 avail = qp->s_size - (qp->s_head - slast);
1933 else
1934 avail = slast - qp->s_head;
1935
1936 reserved_used = atomic_read(&qp->s_reserved_used);
1937 avail = avail - 1 -
1938 (rdi->dparms.reserved_operations - reserved_used);
1939 /* insure we don't assign a negative s_avail */
1940 if ((s32)avail <= 0)
1941 return -ENOMEM;
1942 qp->s_avail = avail;
1943 if (WARN_ON(qp->s_avail >
1944 (qp->s_size - 1 - rdi->dparms.reserved_operations)))
1945 rvt_pr_err(rdi,
1946 "More avail entries than QP RB size.\nQP: %u, size: %u, avail: %u\nhead: %u, tail: %u, cur: %u, acked: %u, last: %u",
1947 qp->ibqp.qp_num, qp->s_size, qp->s_avail,
1948 qp->s_head, qp->s_tail, qp->s_cur,
1949 qp->s_acked, qp->s_last);
1950 return 0;
1951 }
1952
1953 /**
1954 * rvt_post_one_wr - post one RC, UC, or UD send work request
1955 * @qp: the QP to post on
1956 * @wr: the work request to send
1957 * @call_send: kick the send engine into gear
1958 */
rvt_post_one_wr(struct rvt_qp * qp,const struct ib_send_wr * wr,bool * call_send)1959 static int rvt_post_one_wr(struct rvt_qp *qp,
1960 const struct ib_send_wr *wr,
1961 bool *call_send)
1962 {
1963 struct rvt_swqe *wqe;
1964 u32 next;
1965 int i;
1966 int j;
1967 int acc;
1968 struct rvt_lkey_table *rkt;
1969 struct rvt_pd *pd;
1970 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
1971 u8 log_pmtu;
1972 int ret;
1973 size_t cplen;
1974 bool reserved_op;
1975 int local_ops_delayed = 0;
1976
1977 BUILD_BUG_ON(IB_QPT_MAX >= (sizeof(u32) * BITS_PER_BYTE));
1978
1979 /* IB spec says that num_sge == 0 is OK. */
1980 if (unlikely(wr->num_sge > qp->s_max_sge))
1981 return -EINVAL;
1982
1983 ret = rvt_qp_valid_operation(qp, rdi->post_parms, wr);
1984 if (ret < 0)
1985 return ret;
1986 cplen = ret;
1987
1988 /*
1989 * Local operations include fast register and local invalidate.
1990 * Fast register needs to be processed immediately because the
1991 * registered lkey may be used by following work requests and the
1992 * lkey needs to be valid at the time those requests are posted.
1993 * Local invalidate can be processed immediately if fencing is
1994 * not required and no previous local invalidate ops are pending.
1995 * Signaled local operations that have been processed immediately
1996 * need to have requests with "completion only" flags set posted
1997 * to the send queue in order to generate completions.
1998 */
1999 if ((rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL)) {
2000 switch (wr->opcode) {
2001 case IB_WR_REG_MR:
2002 ret = rvt_fast_reg_mr(qp,
2003 reg_wr(wr)->mr,
2004 reg_wr(wr)->key,
2005 reg_wr(wr)->access);
2006 if (ret || !(wr->send_flags & IB_SEND_SIGNALED))
2007 return ret;
2008 break;
2009 case IB_WR_LOCAL_INV:
2010 if ((wr->send_flags & IB_SEND_FENCE) ||
2011 atomic_read(&qp->local_ops_pending)) {
2012 local_ops_delayed = 1;
2013 } else {
2014 ret = rvt_invalidate_rkey(
2015 qp, wr->ex.invalidate_rkey);
2016 if (ret || !(wr->send_flags & IB_SEND_SIGNALED))
2017 return ret;
2018 }
2019 break;
2020 default:
2021 return -EINVAL;
2022 }
2023 }
2024
2025 reserved_op = rdi->post_parms[wr->opcode].flags &
2026 RVT_OPERATION_USE_RESERVE;
2027 /* check for avail */
2028 ret = rvt_qp_is_avail(qp, rdi, reserved_op);
2029 if (ret)
2030 return ret;
2031 next = qp->s_head + 1;
2032 if (next >= qp->s_size)
2033 next = 0;
2034
2035 rkt = &rdi->lkey_table;
2036 pd = ibpd_to_rvtpd(qp->ibqp.pd);
2037 wqe = rvt_get_swqe_ptr(qp, qp->s_head);
2038
2039 /* cplen has length from above */
2040 memcpy(&wqe->ud_wr, wr, cplen);
2041
2042 wqe->length = 0;
2043 j = 0;
2044 if (wr->num_sge) {
2045 struct rvt_sge *last_sge = NULL;
2046
2047 acc = wr->opcode >= IB_WR_RDMA_READ ?
2048 IB_ACCESS_LOCAL_WRITE : 0;
2049 for (i = 0; i < wr->num_sge; i++) {
2050 u32 length = wr->sg_list[i].length;
2051
2052 if (length == 0)
2053 continue;
2054 ret = rvt_lkey_ok(rkt, pd, &wqe->sg_list[j], last_sge,
2055 &wr->sg_list[i], acc);
2056 if (unlikely(ret < 0))
2057 goto bail_inval_free;
2058 wqe->length += length;
2059 if (ret)
2060 last_sge = &wqe->sg_list[j];
2061 j += ret;
2062 }
2063 wqe->wr.num_sge = j;
2064 }
2065
2066 /*
2067 * Calculate and set SWQE PSN values prior to handing it off
2068 * to the driver's check routine. This give the driver the
2069 * opportunity to adjust PSN values based on internal checks.
2070 */
2071 log_pmtu = qp->log_pmtu;
2072 if (qp->allowed_ops == IB_OPCODE_UD) {
2073 struct rvt_ah *ah = rvt_get_swqe_ah(wqe);
2074
2075 log_pmtu = ah->log_pmtu;
2076 rdma_copy_ah_attr(wqe->ud_wr.attr, &ah->attr);
2077 }
2078
2079 if (rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL) {
2080 if (local_ops_delayed)
2081 atomic_inc(&qp->local_ops_pending);
2082 else
2083 wqe->wr.send_flags |= RVT_SEND_COMPLETION_ONLY;
2084 wqe->ssn = 0;
2085 wqe->psn = 0;
2086 wqe->lpsn = 0;
2087 } else {
2088 wqe->ssn = qp->s_ssn++;
2089 wqe->psn = qp->s_next_psn;
2090 wqe->lpsn = wqe->psn +
2091 (wqe->length ?
2092 ((wqe->length - 1) >> log_pmtu) :
2093 0);
2094 }
2095
2096 /* general part of wqe valid - allow for driver checks */
2097 if (rdi->driver_f.setup_wqe) {
2098 ret = rdi->driver_f.setup_wqe(qp, wqe, call_send);
2099 if (ret < 0)
2100 goto bail_inval_free_ref;
2101 }
2102
2103 if (!(rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL))
2104 qp->s_next_psn = wqe->lpsn + 1;
2105
2106 if (unlikely(reserved_op)) {
2107 wqe->wr.send_flags |= RVT_SEND_RESERVE_USED;
2108 rvt_qp_wqe_reserve(qp, wqe);
2109 } else {
2110 wqe->wr.send_flags &= ~RVT_SEND_RESERVE_USED;
2111 qp->s_avail--;
2112 }
2113 trace_rvt_post_one_wr(qp, wqe, wr->num_sge);
2114 smp_wmb(); /* see request builders */
2115 qp->s_head = next;
2116
2117 return 0;
2118
2119 bail_inval_free_ref:
2120 if (qp->allowed_ops == IB_OPCODE_UD)
2121 rdma_destroy_ah_attr(wqe->ud_wr.attr);
2122 bail_inval_free:
2123 /* release mr holds */
2124 while (j) {
2125 struct rvt_sge *sge = &wqe->sg_list[--j];
2126
2127 rvt_put_mr(sge->mr);
2128 }
2129 return ret;
2130 }
2131
2132 /**
2133 * rvt_post_send - post a send on a QP
2134 * @ibqp: the QP to post the send on
2135 * @wr: the list of work requests to post
2136 * @bad_wr: the first bad WR is put here
2137 *
2138 * This may be called from interrupt context.
2139 *
2140 * Return: 0 on success else errno
2141 */
rvt_post_send(struct ib_qp * ibqp,const struct ib_send_wr * wr,const struct ib_send_wr ** bad_wr)2142 int rvt_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
2143 const struct ib_send_wr **bad_wr)
2144 {
2145 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
2146 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
2147 unsigned long flags = 0;
2148 bool call_send;
2149 unsigned nreq = 0;
2150 int err = 0;
2151
2152 spin_lock_irqsave(&qp->s_hlock, flags);
2153
2154 /*
2155 * Ensure QP state is such that we can send. If not bail out early,
2156 * there is no need to do this every time we post a send.
2157 */
2158 if (unlikely(!(ib_rvt_state_ops[qp->state] & RVT_POST_SEND_OK))) {
2159 spin_unlock_irqrestore(&qp->s_hlock, flags);
2160 return -EINVAL;
2161 }
2162
2163 /*
2164 * If the send queue is empty, and we only have a single WR then just go
2165 * ahead and kick the send engine into gear. Otherwise we will always
2166 * just schedule the send to happen later.
2167 */
2168 call_send = qp->s_head == READ_ONCE(qp->s_last) && !wr->next;
2169
2170 for (; wr; wr = wr->next) {
2171 err = rvt_post_one_wr(qp, wr, &call_send);
2172 if (unlikely(err)) {
2173 *bad_wr = wr;
2174 goto bail;
2175 }
2176 nreq++;
2177 }
2178 bail:
2179 spin_unlock_irqrestore(&qp->s_hlock, flags);
2180 if (nreq) {
2181 /*
2182 * Only call do_send if there is exactly one packet, and the
2183 * driver said it was ok.
2184 */
2185 if (nreq == 1 && call_send)
2186 rdi->driver_f.do_send(qp);
2187 else
2188 rdi->driver_f.schedule_send_no_lock(qp);
2189 }
2190 return err;
2191 }
2192
2193 /**
2194 * rvt_post_srq_recv - post a receive on a shared receive queue
2195 * @ibsrq: the SRQ to post the receive on
2196 * @wr: the list of work requests to post
2197 * @bad_wr: A pointer to the first WR to cause a problem is put here
2198 *
2199 * This may be called from interrupt context.
2200 *
2201 * Return: 0 on success else errno
2202 */
rvt_post_srq_recv(struct ib_srq * ibsrq,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)2203 int rvt_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
2204 const struct ib_recv_wr **bad_wr)
2205 {
2206 struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
2207 struct rvt_krwq *wq;
2208 unsigned long flags;
2209
2210 for (; wr; wr = wr->next) {
2211 struct rvt_rwqe *wqe;
2212 u32 next;
2213 int i;
2214
2215 if ((unsigned)wr->num_sge > srq->rq.max_sge) {
2216 *bad_wr = wr;
2217 return -EINVAL;
2218 }
2219
2220 spin_lock_irqsave(&srq->rq.kwq->p_lock, flags);
2221 wq = srq->rq.kwq;
2222 next = wq->head + 1;
2223 if (next >= srq->rq.size)
2224 next = 0;
2225 if (next == READ_ONCE(wq->tail)) {
2226 spin_unlock_irqrestore(&srq->rq.kwq->p_lock, flags);
2227 *bad_wr = wr;
2228 return -ENOMEM;
2229 }
2230
2231 wqe = rvt_get_rwqe_ptr(&srq->rq, wq->head);
2232 wqe->wr_id = wr->wr_id;
2233 wqe->num_sge = wr->num_sge;
2234 for (i = 0; i < wr->num_sge; i++) {
2235 wqe->sg_list[i].addr = wr->sg_list[i].addr;
2236 wqe->sg_list[i].length = wr->sg_list[i].length;
2237 wqe->sg_list[i].lkey = wr->sg_list[i].lkey;
2238 }
2239 /* Make sure queue entry is written before the head index. */
2240 smp_store_release(&wq->head, next);
2241 spin_unlock_irqrestore(&srq->rq.kwq->p_lock, flags);
2242 }
2243 return 0;
2244 }
2245
2246 /*
2247 * rvt used the internal kernel struct as part of its ABI, for now make sure
2248 * the kernel struct does not change layout. FIXME: rvt should never cast the
2249 * user struct to a kernel struct.
2250 */
rvt_cast_sge(struct rvt_wqe_sge * sge)2251 static struct ib_sge *rvt_cast_sge(struct rvt_wqe_sge *sge)
2252 {
2253 BUILD_BUG_ON(offsetof(struct ib_sge, addr) !=
2254 offsetof(struct rvt_wqe_sge, addr));
2255 BUILD_BUG_ON(offsetof(struct ib_sge, length) !=
2256 offsetof(struct rvt_wqe_sge, length));
2257 BUILD_BUG_ON(offsetof(struct ib_sge, lkey) !=
2258 offsetof(struct rvt_wqe_sge, lkey));
2259 return (struct ib_sge *)sge;
2260 }
2261
2262 /*
2263 * Validate a RWQE and fill in the SGE state.
2264 * Return 1 if OK.
2265 */
init_sge(struct rvt_qp * qp,struct rvt_rwqe * wqe)2266 static int init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe)
2267 {
2268 int i, j, ret;
2269 struct ib_wc wc;
2270 struct rvt_lkey_table *rkt;
2271 struct rvt_pd *pd;
2272 struct rvt_sge_state *ss;
2273 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2274
2275 rkt = &rdi->lkey_table;
2276 pd = ibpd_to_rvtpd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
2277 ss = &qp->r_sge;
2278 ss->sg_list = qp->r_sg_list;
2279 qp->r_len = 0;
2280 for (i = j = 0; i < wqe->num_sge; i++) {
2281 if (wqe->sg_list[i].length == 0)
2282 continue;
2283 /* Check LKEY */
2284 ret = rvt_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
2285 NULL, rvt_cast_sge(&wqe->sg_list[i]),
2286 IB_ACCESS_LOCAL_WRITE);
2287 if (unlikely(ret <= 0))
2288 goto bad_lkey;
2289 qp->r_len += wqe->sg_list[i].length;
2290 j++;
2291 }
2292 ss->num_sge = j;
2293 ss->total_len = qp->r_len;
2294 return 1;
2295
2296 bad_lkey:
2297 while (j) {
2298 struct rvt_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
2299
2300 rvt_put_mr(sge->mr);
2301 }
2302 ss->num_sge = 0;
2303 memset(&wc, 0, sizeof(wc));
2304 wc.wr_id = wqe->wr_id;
2305 wc.status = IB_WC_LOC_PROT_ERR;
2306 wc.opcode = IB_WC_RECV;
2307 wc.qp = &qp->ibqp;
2308 /* Signal solicited completion event. */
2309 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
2310 return 0;
2311 }
2312
2313 /**
2314 * get_rvt_head - get head indices of the circular buffer
2315 * @rq: data structure for request queue entry
2316 * @ip: the QP
2317 *
2318 * Return - head index value
2319 */
get_rvt_head(struct rvt_rq * rq,void * ip)2320 static inline u32 get_rvt_head(struct rvt_rq *rq, void *ip)
2321 {
2322 u32 head;
2323
2324 if (ip)
2325 head = RDMA_READ_UAPI_ATOMIC(rq->wq->head);
2326 else
2327 head = rq->kwq->head;
2328
2329 return head;
2330 }
2331
2332 /**
2333 * rvt_get_rwqe - copy the next RWQE into the QP's RWQE
2334 * @qp: the QP
2335 * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
2336 *
2337 * Return -1 if there is a local error, 0 if no RWQE is available,
2338 * otherwise return 1.
2339 *
2340 * Can be called from interrupt level.
2341 */
rvt_get_rwqe(struct rvt_qp * qp,bool wr_id_only)2342 int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only)
2343 {
2344 unsigned long flags;
2345 struct rvt_rq *rq;
2346 struct rvt_krwq *kwq = NULL;
2347 struct rvt_rwq *wq;
2348 struct rvt_srq *srq;
2349 struct rvt_rwqe *wqe;
2350 void (*handler)(struct ib_event *, void *);
2351 u32 tail;
2352 u32 head;
2353 int ret;
2354 void *ip = NULL;
2355
2356 if (qp->ibqp.srq) {
2357 srq = ibsrq_to_rvtsrq(qp->ibqp.srq);
2358 handler = srq->ibsrq.event_handler;
2359 rq = &srq->rq;
2360 ip = srq->ip;
2361 } else {
2362 srq = NULL;
2363 handler = NULL;
2364 rq = &qp->r_rq;
2365 ip = qp->ip;
2366 }
2367
2368 spin_lock_irqsave(&rq->kwq->c_lock, flags);
2369 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
2370 ret = 0;
2371 goto unlock;
2372 }
2373 kwq = rq->kwq;
2374 if (ip) {
2375 wq = rq->wq;
2376 tail = RDMA_READ_UAPI_ATOMIC(wq->tail);
2377 } else {
2378 tail = kwq->tail;
2379 }
2380
2381 /* Validate tail before using it since it is user writable. */
2382 if (tail >= rq->size)
2383 tail = 0;
2384
2385 if (kwq->count < RVT_RWQ_COUNT_THRESHOLD) {
2386 head = get_rvt_head(rq, ip);
2387 kwq->count = rvt_get_rq_count(rq, head, tail);
2388 }
2389 if (unlikely(kwq->count == 0)) {
2390 ret = 0;
2391 goto unlock;
2392 }
2393 /* Make sure entry is read after the count is read. */
2394 smp_rmb();
2395 wqe = rvt_get_rwqe_ptr(rq, tail);
2396 /*
2397 * Even though we update the tail index in memory, the verbs
2398 * consumer is not supposed to post more entries until a
2399 * completion is generated.
2400 */
2401 if (++tail >= rq->size)
2402 tail = 0;
2403 if (ip)
2404 RDMA_WRITE_UAPI_ATOMIC(wq->tail, tail);
2405 else
2406 kwq->tail = tail;
2407 if (!wr_id_only && !init_sge(qp, wqe)) {
2408 ret = -1;
2409 goto unlock;
2410 }
2411 qp->r_wr_id = wqe->wr_id;
2412
2413 kwq->count--;
2414 ret = 1;
2415 set_bit(RVT_R_WRID_VALID, &qp->r_aflags);
2416 if (handler) {
2417 /*
2418 * Validate head pointer value and compute
2419 * the number of remaining WQEs.
2420 */
2421 if (kwq->count < srq->limit) {
2422 kwq->count =
2423 rvt_get_rq_count(rq,
2424 get_rvt_head(rq, ip), tail);
2425 if (kwq->count < srq->limit) {
2426 struct ib_event ev;
2427
2428 srq->limit = 0;
2429 spin_unlock_irqrestore(&rq->kwq->c_lock, flags);
2430 ev.device = qp->ibqp.device;
2431 ev.element.srq = qp->ibqp.srq;
2432 ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
2433 handler(&ev, srq->ibsrq.srq_context);
2434 goto bail;
2435 }
2436 }
2437 }
2438 unlock:
2439 spin_unlock_irqrestore(&rq->kwq->c_lock, flags);
2440 bail:
2441 return ret;
2442 }
2443 EXPORT_SYMBOL(rvt_get_rwqe);
2444
2445 /**
2446 * rvt_comm_est - handle trap with QP established
2447 * @qp: the QP
2448 */
rvt_comm_est(struct rvt_qp * qp)2449 void rvt_comm_est(struct rvt_qp *qp)
2450 {
2451 qp->r_flags |= RVT_R_COMM_EST;
2452 if (qp->ibqp.event_handler) {
2453 struct ib_event ev;
2454
2455 ev.device = qp->ibqp.device;
2456 ev.element.qp = &qp->ibqp;
2457 ev.event = IB_EVENT_COMM_EST;
2458 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
2459 }
2460 }
2461 EXPORT_SYMBOL(rvt_comm_est);
2462
rvt_rc_error(struct rvt_qp * qp,enum ib_wc_status err)2463 void rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err)
2464 {
2465 unsigned long flags;
2466 int lastwqe;
2467
2468 spin_lock_irqsave(&qp->s_lock, flags);
2469 lastwqe = rvt_error_qp(qp, err);
2470 spin_unlock_irqrestore(&qp->s_lock, flags);
2471
2472 if (lastwqe) {
2473 struct ib_event ev;
2474
2475 ev.device = qp->ibqp.device;
2476 ev.element.qp = &qp->ibqp;
2477 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
2478 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
2479 }
2480 }
2481 EXPORT_SYMBOL(rvt_rc_error);
2482
2483 /*
2484 * rvt_rnr_tbl_to_usec - return index into ib_rvt_rnr_table
2485 * @index - the index
2486 * return usec from an index into ib_rvt_rnr_table
2487 */
rvt_rnr_tbl_to_usec(u32 index)2488 unsigned long rvt_rnr_tbl_to_usec(u32 index)
2489 {
2490 return ib_rvt_rnr_table[(index & IB_AETH_CREDIT_MASK)];
2491 }
2492 EXPORT_SYMBOL(rvt_rnr_tbl_to_usec);
2493
rvt_aeth_to_usec(u32 aeth)2494 static inline unsigned long rvt_aeth_to_usec(u32 aeth)
2495 {
2496 return ib_rvt_rnr_table[(aeth >> IB_AETH_CREDIT_SHIFT) &
2497 IB_AETH_CREDIT_MASK];
2498 }
2499
2500 /*
2501 * rvt_add_retry_timer_ext - add/start a retry timer
2502 * @qp - the QP
2503 * @shift - timeout shift to wait for multiple packets
2504 * add a retry timer on the QP
2505 */
rvt_add_retry_timer_ext(struct rvt_qp * qp,u8 shift)2506 void rvt_add_retry_timer_ext(struct rvt_qp *qp, u8 shift)
2507 {
2508 struct ib_qp *ibqp = &qp->ibqp;
2509 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
2510
2511 lockdep_assert_held(&qp->s_lock);
2512 qp->s_flags |= RVT_S_TIMER;
2513 /* 4.096 usec. * (1 << qp->timeout) */
2514 qp->s_timer.expires = jiffies + rdi->busy_jiffies +
2515 (qp->timeout_jiffies << shift);
2516 add_timer(&qp->s_timer);
2517 }
2518 EXPORT_SYMBOL(rvt_add_retry_timer_ext);
2519
2520 /**
2521 * rvt_add_rnr_timer - add/start an rnr timer on the QP
2522 * @qp: the QP
2523 * @aeth: aeth of RNR timeout, simulated aeth for loopback
2524 */
rvt_add_rnr_timer(struct rvt_qp * qp,u32 aeth)2525 void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth)
2526 {
2527 u32 to;
2528
2529 lockdep_assert_held(&qp->s_lock);
2530 qp->s_flags |= RVT_S_WAIT_RNR;
2531 to = rvt_aeth_to_usec(aeth);
2532 trace_rvt_rnrnak_add(qp, to);
2533 hrtimer_start(&qp->s_rnr_timer,
2534 ns_to_ktime(1000 * to), HRTIMER_MODE_REL_PINNED);
2535 }
2536 EXPORT_SYMBOL(rvt_add_rnr_timer);
2537
2538 /**
2539 * rvt_stop_rc_timers - stop all timers
2540 * @qp: the QP
2541 * stop any pending timers
2542 */
rvt_stop_rc_timers(struct rvt_qp * qp)2543 void rvt_stop_rc_timers(struct rvt_qp *qp)
2544 {
2545 lockdep_assert_held(&qp->s_lock);
2546 /* Remove QP from all timers */
2547 if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
2548 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
2549 timer_delete(&qp->s_timer);
2550 hrtimer_try_to_cancel(&qp->s_rnr_timer);
2551 }
2552 }
2553 EXPORT_SYMBOL(rvt_stop_rc_timers);
2554
2555 /**
2556 * rvt_stop_rnr_timer - stop an rnr timer
2557 * @qp: the QP
2558 *
2559 * stop an rnr timer and return if the timer
2560 * had been pending.
2561 */
rvt_stop_rnr_timer(struct rvt_qp * qp)2562 static void rvt_stop_rnr_timer(struct rvt_qp *qp)
2563 {
2564 lockdep_assert_held(&qp->s_lock);
2565 /* Remove QP from rnr timer */
2566 if (qp->s_flags & RVT_S_WAIT_RNR) {
2567 qp->s_flags &= ~RVT_S_WAIT_RNR;
2568 trace_rvt_rnrnak_stop(qp, 0);
2569 }
2570 }
2571
2572 /**
2573 * rvt_del_timers_sync - wait for any timeout routines to exit
2574 * @qp: the QP
2575 */
rvt_del_timers_sync(struct rvt_qp * qp)2576 void rvt_del_timers_sync(struct rvt_qp *qp)
2577 {
2578 timer_delete_sync(&qp->s_timer);
2579 hrtimer_cancel(&qp->s_rnr_timer);
2580 }
2581 EXPORT_SYMBOL(rvt_del_timers_sync);
2582
2583 /*
2584 * This is called from s_timer for missing responses.
2585 */
rvt_rc_timeout(struct timer_list * t)2586 static void rvt_rc_timeout(struct timer_list *t)
2587 {
2588 struct rvt_qp *qp = timer_container_of(qp, t, s_timer);
2589 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2590 unsigned long flags;
2591
2592 spin_lock_irqsave(&qp->r_lock, flags);
2593 spin_lock(&qp->s_lock);
2594 if (qp->s_flags & RVT_S_TIMER) {
2595 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
2596
2597 qp->s_flags &= ~RVT_S_TIMER;
2598 rvp->n_rc_timeouts++;
2599 timer_delete(&qp->s_timer);
2600 trace_rvt_rc_timeout(qp, qp->s_last_psn + 1);
2601 if (rdi->driver_f.notify_restart_rc)
2602 rdi->driver_f.notify_restart_rc(qp,
2603 qp->s_last_psn + 1,
2604 1);
2605 rdi->driver_f.schedule_send(qp);
2606 }
2607 spin_unlock(&qp->s_lock);
2608 spin_unlock_irqrestore(&qp->r_lock, flags);
2609 }
2610
2611 /*
2612 * This is called from s_timer for RNR timeouts.
2613 */
rvt_rc_rnr_retry(struct hrtimer * t)2614 enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t)
2615 {
2616 struct rvt_qp *qp = container_of(t, struct rvt_qp, s_rnr_timer);
2617 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2618 unsigned long flags;
2619
2620 spin_lock_irqsave(&qp->s_lock, flags);
2621 rvt_stop_rnr_timer(qp);
2622 trace_rvt_rnrnak_timeout(qp, 0);
2623 rdi->driver_f.schedule_send(qp);
2624 spin_unlock_irqrestore(&qp->s_lock, flags);
2625 return HRTIMER_NORESTART;
2626 }
2627 EXPORT_SYMBOL(rvt_rc_rnr_retry);
2628
2629 /**
2630 * rvt_qp_iter_init - initial for QP iteration
2631 * @rdi: rvt devinfo
2632 * @v: u64 value
2633 * @cb: user-defined callback
2634 *
2635 * This returns an iterator suitable for iterating QPs
2636 * in the system.
2637 *
2638 * The @cb is a user-defined callback and @v is a 64-bit
2639 * value passed to and relevant for processing in the
2640 * @cb. An example use case would be to alter QP processing
2641 * based on criteria not part of the rvt_qp.
2642 *
2643 * Use cases that require memory allocation to succeed
2644 * must preallocate appropriately.
2645 *
2646 * Return: a pointer to an rvt_qp_iter or NULL
2647 */
rvt_qp_iter_init(struct rvt_dev_info * rdi,u64 v,void (* cb)(struct rvt_qp * qp,u64 v))2648 struct rvt_qp_iter *rvt_qp_iter_init(struct rvt_dev_info *rdi,
2649 u64 v,
2650 void (*cb)(struct rvt_qp *qp, u64 v))
2651 {
2652 struct rvt_qp_iter *i;
2653
2654 i = kzalloc(sizeof(*i), GFP_KERNEL);
2655 if (!i)
2656 return NULL;
2657
2658 i->rdi = rdi;
2659 /* number of special QPs (SMI/GSI) for device */
2660 i->specials = rdi->ibdev.phys_port_cnt * 2;
2661 i->v = v;
2662 i->cb = cb;
2663
2664 return i;
2665 }
2666 EXPORT_SYMBOL(rvt_qp_iter_init);
2667
2668 /**
2669 * rvt_qp_iter_next - return the next QP in iter
2670 * @iter: the iterator
2671 *
2672 * Fine grained QP iterator suitable for use
2673 * with debugfs seq_file mechanisms.
2674 *
2675 * Updates iter->qp with the current QP when the return
2676 * value is 0.
2677 *
2678 * Return: 0 - iter->qp is valid 1 - no more QPs
2679 */
rvt_qp_iter_next(struct rvt_qp_iter * iter)2680 int rvt_qp_iter_next(struct rvt_qp_iter *iter)
2681 __must_hold(RCU)
2682 {
2683 int n = iter->n;
2684 int ret = 1;
2685 struct rvt_qp *pqp = iter->qp;
2686 struct rvt_qp *qp;
2687 struct rvt_dev_info *rdi = iter->rdi;
2688
2689 /*
2690 * The approach is to consider the special qps
2691 * as additional table entries before the
2692 * real hash table. Since the qp code sets
2693 * the qp->next hash link to NULL, this works just fine.
2694 *
2695 * iter->specials is 2 * # ports
2696 *
2697 * n = 0..iter->specials is the special qp indices
2698 *
2699 * n = iter->specials..rdi->qp_dev->qp_table_size+iter->specials are
2700 * the potential hash bucket entries
2701 *
2702 */
2703 for (; n < rdi->qp_dev->qp_table_size + iter->specials; n++) {
2704 if (pqp) {
2705 qp = rcu_dereference(pqp->next);
2706 } else {
2707 if (n < iter->specials) {
2708 struct rvt_ibport *rvp;
2709 int pidx;
2710
2711 pidx = n % rdi->ibdev.phys_port_cnt;
2712 rvp = rdi->ports[pidx];
2713 qp = rcu_dereference(rvp->qp[n & 1]);
2714 } else {
2715 qp = rcu_dereference(
2716 rdi->qp_dev->qp_table[
2717 (n - iter->specials)]);
2718 }
2719 }
2720 pqp = qp;
2721 if (qp) {
2722 iter->qp = qp;
2723 iter->n = n;
2724 return 0;
2725 }
2726 }
2727 return ret;
2728 }
2729 EXPORT_SYMBOL(rvt_qp_iter_next);
2730
2731 /**
2732 * rvt_qp_iter - iterate all QPs
2733 * @rdi: rvt devinfo
2734 * @v: a 64-bit value
2735 * @cb: a callback
2736 *
2737 * This provides a way for iterating all QPs.
2738 *
2739 * The @cb is a user-defined callback and @v is a 64-bit
2740 * value passed to and relevant for processing in the
2741 * cb. An example use case would be to alter QP processing
2742 * based on criteria not part of the rvt_qp.
2743 *
2744 * The code has an internal iterator to simplify
2745 * non seq_file use cases.
2746 */
rvt_qp_iter(struct rvt_dev_info * rdi,u64 v,void (* cb)(struct rvt_qp * qp,u64 v))2747 void rvt_qp_iter(struct rvt_dev_info *rdi,
2748 u64 v,
2749 void (*cb)(struct rvt_qp *qp, u64 v))
2750 {
2751 int ret;
2752 struct rvt_qp_iter i = {
2753 .rdi = rdi,
2754 .specials = rdi->ibdev.phys_port_cnt * 2,
2755 .v = v,
2756 .cb = cb
2757 };
2758
2759 rcu_read_lock();
2760 do {
2761 ret = rvt_qp_iter_next(&i);
2762 if (!ret) {
2763 rvt_get_qp(i.qp);
2764 rcu_read_unlock();
2765 i.cb(i.qp, i.v);
2766 rcu_read_lock();
2767 rvt_put_qp(i.qp);
2768 }
2769 } while (!ret);
2770 rcu_read_unlock();
2771 }
2772 EXPORT_SYMBOL(rvt_qp_iter);
2773
2774 /*
2775 * This should be called with s_lock and r_lock held.
2776 */
rvt_send_complete(struct rvt_qp * qp,struct rvt_swqe * wqe,enum ib_wc_status status)2777 void rvt_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
2778 enum ib_wc_status status)
2779 {
2780 u32 old_last, last;
2781 struct rvt_dev_info *rdi;
2782
2783 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
2784 return;
2785 rdi = ib_to_rvt(qp->ibqp.device);
2786
2787 old_last = qp->s_last;
2788 trace_rvt_qp_send_completion(qp, wqe, old_last);
2789 last = rvt_qp_complete_swqe(qp, wqe, rdi->wc_opcode[wqe->wr.opcode],
2790 status);
2791 if (qp->s_acked == old_last)
2792 qp->s_acked = last;
2793 if (qp->s_cur == old_last)
2794 qp->s_cur = last;
2795 if (qp->s_tail == old_last)
2796 qp->s_tail = last;
2797 if (qp->state == IB_QPS_SQD && last == qp->s_cur)
2798 qp->s_draining = 0;
2799 }
2800 EXPORT_SYMBOL(rvt_send_complete);
2801
2802 /**
2803 * rvt_copy_sge - copy data to SGE memory
2804 * @qp: associated QP
2805 * @ss: the SGE state
2806 * @data: the data to copy
2807 * @length: the length of the data
2808 * @release: boolean to release MR
2809 * @copy_last: do a separate copy of the last 8 bytes
2810 */
rvt_copy_sge(struct rvt_qp * qp,struct rvt_sge_state * ss,void * data,u32 length,bool release,bool copy_last)2811 void rvt_copy_sge(struct rvt_qp *qp, struct rvt_sge_state *ss,
2812 void *data, u32 length,
2813 bool release, bool copy_last)
2814 {
2815 struct rvt_sge *sge = &ss->sge;
2816 int i;
2817 bool in_last = false;
2818 bool cacheless_copy = false;
2819 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2820 struct rvt_wss *wss = rdi->wss;
2821 unsigned int sge_copy_mode = rdi->dparms.sge_copy_mode;
2822
2823 if (sge_copy_mode == RVT_SGE_COPY_CACHELESS) {
2824 cacheless_copy = length >= PAGE_SIZE;
2825 } else if (sge_copy_mode == RVT_SGE_COPY_ADAPTIVE) {
2826 if (length >= PAGE_SIZE) {
2827 /*
2828 * NOTE: this *assumes*:
2829 * o The first vaddr is the dest.
2830 * o If multiple pages, then vaddr is sequential.
2831 */
2832 wss_insert(wss, sge->vaddr);
2833 if (length >= (2 * PAGE_SIZE))
2834 wss_insert(wss, (sge->vaddr + PAGE_SIZE));
2835
2836 cacheless_copy = wss_exceeds_threshold(wss);
2837 } else {
2838 wss_advance_clean_counter(wss);
2839 }
2840 }
2841
2842 if (copy_last) {
2843 if (length > 8) {
2844 length -= 8;
2845 } else {
2846 copy_last = false;
2847 in_last = true;
2848 }
2849 }
2850
2851 again:
2852 while (length) {
2853 u32 len = rvt_get_sge_length(sge, length);
2854
2855 WARN_ON_ONCE(len == 0);
2856 if (unlikely(in_last)) {
2857 /* enforce byte transfer ordering */
2858 for (i = 0; i < len; i++)
2859 ((u8 *)sge->vaddr)[i] = ((u8 *)data)[i];
2860 } else if (cacheless_copy) {
2861 cacheless_memcpy(sge->vaddr, data, len);
2862 } else {
2863 memcpy(sge->vaddr, data, len);
2864 }
2865 rvt_update_sge(ss, len, release);
2866 data += len;
2867 length -= len;
2868 }
2869
2870 if (copy_last) {
2871 copy_last = false;
2872 in_last = true;
2873 length = 8;
2874 goto again;
2875 }
2876 }
2877 EXPORT_SYMBOL(rvt_copy_sge);
2878
loopback_qp_drop(struct rvt_ibport * rvp,struct rvt_qp * sqp)2879 static enum ib_wc_status loopback_qp_drop(struct rvt_ibport *rvp,
2880 struct rvt_qp *sqp)
2881 {
2882 rvp->n_pkt_drops++;
2883 /*
2884 * For RC, the requester would timeout and retry so
2885 * shortcut the timeouts and just signal too many retries.
2886 */
2887 return sqp->ibqp.qp_type == IB_QPT_RC ?
2888 IB_WC_RETRY_EXC_ERR : IB_WC_SUCCESS;
2889 }
2890
2891 /**
2892 * rvt_ruc_loopback - handle UC and RC loopback requests
2893 * @sqp: the sending QP
2894 *
2895 * This is called from rvt_do_send() to forward a WQE addressed to the same HFI
2896 * Note that although we are single threaded due to the send engine, we still
2897 * have to protect against post_send(). We don't have to worry about
2898 * receive interrupts since this is a connected protocol and all packets
2899 * will pass through here.
2900 */
rvt_ruc_loopback(struct rvt_qp * sqp)2901 void rvt_ruc_loopback(struct rvt_qp *sqp)
2902 {
2903 struct rvt_ibport *rvp = NULL;
2904 struct rvt_dev_info *rdi = ib_to_rvt(sqp->ibqp.device);
2905 struct rvt_qp *qp;
2906 struct rvt_swqe *wqe;
2907 struct rvt_sge *sge;
2908 unsigned long flags;
2909 struct ib_wc wc;
2910 u64 sdata;
2911 atomic64_t *maddr;
2912 enum ib_wc_status send_status;
2913 bool release;
2914 int ret;
2915 bool copy_last = false;
2916 int local_ops = 0;
2917
2918 rcu_read_lock();
2919 rvp = rdi->ports[sqp->port_num - 1];
2920
2921 /*
2922 * Note that we check the responder QP state after
2923 * checking the requester's state.
2924 */
2925
2926 qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), rvp,
2927 sqp->remote_qpn);
2928
2929 spin_lock_irqsave(&sqp->s_lock, flags);
2930
2931 /* Return if we are already busy processing a work request. */
2932 if ((sqp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT)) ||
2933 !(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_OR_FLUSH_SEND))
2934 goto unlock;
2935
2936 sqp->s_flags |= RVT_S_BUSY;
2937
2938 again:
2939 if (sqp->s_last == READ_ONCE(sqp->s_head))
2940 goto clr_busy;
2941 wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
2942
2943 /* Return if it is not OK to start a new work request. */
2944 if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
2945 if (!(ib_rvt_state_ops[sqp->state] & RVT_FLUSH_SEND))
2946 goto clr_busy;
2947 /* We are in the error state, flush the work request. */
2948 send_status = IB_WC_WR_FLUSH_ERR;
2949 goto flush_send;
2950 }
2951
2952 /*
2953 * We can rely on the entry not changing without the s_lock
2954 * being held until we update s_last.
2955 * We increment s_cur to indicate s_last is in progress.
2956 */
2957 if (sqp->s_last == sqp->s_cur) {
2958 if (++sqp->s_cur >= sqp->s_size)
2959 sqp->s_cur = 0;
2960 }
2961 spin_unlock_irqrestore(&sqp->s_lock, flags);
2962
2963 if (!qp) {
2964 send_status = loopback_qp_drop(rvp, sqp);
2965 goto serr_no_r_lock;
2966 }
2967 spin_lock_irqsave(&qp->r_lock, flags);
2968 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) ||
2969 qp->ibqp.qp_type != sqp->ibqp.qp_type) {
2970 send_status = loopback_qp_drop(rvp, sqp);
2971 goto serr;
2972 }
2973
2974 memset(&wc, 0, sizeof(wc));
2975 send_status = IB_WC_SUCCESS;
2976
2977 release = true;
2978 sqp->s_sge.sge = wqe->sg_list[0];
2979 sqp->s_sge.sg_list = wqe->sg_list + 1;
2980 sqp->s_sge.num_sge = wqe->wr.num_sge;
2981 sqp->s_len = wqe->length;
2982 switch (wqe->wr.opcode) {
2983 case IB_WR_REG_MR:
2984 goto send_comp;
2985
2986 case IB_WR_LOCAL_INV:
2987 if (!(wqe->wr.send_flags & RVT_SEND_COMPLETION_ONLY)) {
2988 if (rvt_invalidate_rkey(sqp,
2989 wqe->wr.ex.invalidate_rkey))
2990 send_status = IB_WC_LOC_PROT_ERR;
2991 local_ops = 1;
2992 }
2993 goto send_comp;
2994
2995 case IB_WR_SEND_WITH_INV:
2996 case IB_WR_SEND_WITH_IMM:
2997 case IB_WR_SEND:
2998 ret = rvt_get_rwqe(qp, false);
2999 if (ret < 0)
3000 goto op_err;
3001 if (!ret)
3002 goto rnr_nak;
3003 if (wqe->length > qp->r_len)
3004 goto inv_err;
3005 switch (wqe->wr.opcode) {
3006 case IB_WR_SEND_WITH_INV:
3007 if (!rvt_invalidate_rkey(qp,
3008 wqe->wr.ex.invalidate_rkey)) {
3009 wc.wc_flags = IB_WC_WITH_INVALIDATE;
3010 wc.ex.invalidate_rkey =
3011 wqe->wr.ex.invalidate_rkey;
3012 }
3013 break;
3014 case IB_WR_SEND_WITH_IMM:
3015 wc.wc_flags = IB_WC_WITH_IMM;
3016 wc.ex.imm_data = wqe->wr.ex.imm_data;
3017 break;
3018 default:
3019 break;
3020 }
3021 break;
3022
3023 case IB_WR_RDMA_WRITE_WITH_IMM:
3024 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
3025 goto inv_err;
3026 wc.wc_flags = IB_WC_WITH_IMM;
3027 wc.ex.imm_data = wqe->wr.ex.imm_data;
3028 ret = rvt_get_rwqe(qp, true);
3029 if (ret < 0)
3030 goto op_err;
3031 if (!ret)
3032 goto rnr_nak;
3033 /* skip copy_last set and qp_access_flags recheck */
3034 goto do_write;
3035 case IB_WR_RDMA_WRITE:
3036 copy_last = rvt_is_user_qp(qp);
3037 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
3038 goto inv_err;
3039 do_write:
3040 if (wqe->length == 0)
3041 break;
3042 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
3043 wqe->rdma_wr.remote_addr,
3044 wqe->rdma_wr.rkey,
3045 IB_ACCESS_REMOTE_WRITE)))
3046 goto acc_err;
3047 qp->r_sge.sg_list = NULL;
3048 qp->r_sge.num_sge = 1;
3049 qp->r_sge.total_len = wqe->length;
3050 break;
3051
3052 case IB_WR_RDMA_READ:
3053 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
3054 goto inv_err;
3055 if (unlikely(!rvt_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
3056 wqe->rdma_wr.remote_addr,
3057 wqe->rdma_wr.rkey,
3058 IB_ACCESS_REMOTE_READ)))
3059 goto acc_err;
3060 release = false;
3061 sqp->s_sge.sg_list = NULL;
3062 sqp->s_sge.num_sge = 1;
3063 qp->r_sge.sge = wqe->sg_list[0];
3064 qp->r_sge.sg_list = wqe->sg_list + 1;
3065 qp->r_sge.num_sge = wqe->wr.num_sge;
3066 qp->r_sge.total_len = wqe->length;
3067 break;
3068
3069 case IB_WR_ATOMIC_CMP_AND_SWP:
3070 case IB_WR_ATOMIC_FETCH_AND_ADD:
3071 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
3072 goto inv_err;
3073 if (unlikely(wqe->atomic_wr.remote_addr & (sizeof(u64) - 1)))
3074 goto inv_err;
3075 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
3076 wqe->atomic_wr.remote_addr,
3077 wqe->atomic_wr.rkey,
3078 IB_ACCESS_REMOTE_ATOMIC)))
3079 goto acc_err;
3080 /* Perform atomic OP and save result. */
3081 maddr = (atomic64_t *)qp->r_sge.sge.vaddr;
3082 sdata = wqe->atomic_wr.compare_add;
3083 *(u64 *)sqp->s_sge.sge.vaddr =
3084 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
3085 (u64)atomic64_add_return(sdata, maddr) - sdata :
3086 (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr,
3087 sdata, wqe->atomic_wr.swap);
3088 rvt_put_mr(qp->r_sge.sge.mr);
3089 qp->r_sge.num_sge = 0;
3090 goto send_comp;
3091
3092 default:
3093 send_status = IB_WC_LOC_QP_OP_ERR;
3094 goto serr;
3095 }
3096
3097 sge = &sqp->s_sge.sge;
3098 while (sqp->s_len) {
3099 u32 len = rvt_get_sge_length(sge, sqp->s_len);
3100
3101 WARN_ON_ONCE(len == 0);
3102 rvt_copy_sge(qp, &qp->r_sge, sge->vaddr,
3103 len, release, copy_last);
3104 rvt_update_sge(&sqp->s_sge, len, !release);
3105 sqp->s_len -= len;
3106 }
3107 if (release)
3108 rvt_put_ss(&qp->r_sge);
3109
3110 if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
3111 goto send_comp;
3112
3113 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
3114 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
3115 else
3116 wc.opcode = IB_WC_RECV;
3117 wc.wr_id = qp->r_wr_id;
3118 wc.status = IB_WC_SUCCESS;
3119 wc.byte_len = wqe->length;
3120 wc.qp = &qp->ibqp;
3121 wc.src_qp = qp->remote_qpn;
3122 wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr) & U16_MAX;
3123 wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
3124 wc.port_num = 1;
3125 /* Signal completion event if the solicited bit is set. */
3126 rvt_recv_cq(qp, &wc, wqe->wr.send_flags & IB_SEND_SOLICITED);
3127
3128 send_comp:
3129 spin_unlock_irqrestore(&qp->r_lock, flags);
3130 spin_lock_irqsave(&sqp->s_lock, flags);
3131 rvp->n_loop_pkts++;
3132 flush_send:
3133 sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
3134 spin_lock(&sqp->r_lock);
3135 rvt_send_complete(sqp, wqe, send_status);
3136 spin_unlock(&sqp->r_lock);
3137 if (local_ops) {
3138 atomic_dec(&sqp->local_ops_pending);
3139 local_ops = 0;
3140 }
3141 goto again;
3142
3143 rnr_nak:
3144 /* Handle RNR NAK */
3145 if (qp->ibqp.qp_type == IB_QPT_UC)
3146 goto send_comp;
3147 rvp->n_rnr_naks++;
3148 /*
3149 * Note: we don't need the s_lock held since the BUSY flag
3150 * makes this single threaded.
3151 */
3152 if (sqp->s_rnr_retry == 0) {
3153 send_status = IB_WC_RNR_RETRY_EXC_ERR;
3154 goto serr;
3155 }
3156 if (sqp->s_rnr_retry_cnt < 7)
3157 sqp->s_rnr_retry--;
3158 spin_unlock_irqrestore(&qp->r_lock, flags);
3159 spin_lock_irqsave(&sqp->s_lock, flags);
3160 if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_RECV_OK))
3161 goto clr_busy;
3162 rvt_add_rnr_timer(sqp, qp->r_min_rnr_timer <<
3163 IB_AETH_CREDIT_SHIFT);
3164 goto clr_busy;
3165
3166 op_err:
3167 send_status = IB_WC_REM_OP_ERR;
3168 wc.status = IB_WC_LOC_QP_OP_ERR;
3169 goto err;
3170
3171 inv_err:
3172 send_status =
3173 sqp->ibqp.qp_type == IB_QPT_RC ?
3174 IB_WC_REM_INV_REQ_ERR :
3175 IB_WC_SUCCESS;
3176 wc.status = IB_WC_LOC_QP_OP_ERR;
3177 goto err;
3178
3179 acc_err:
3180 send_status = IB_WC_REM_ACCESS_ERR;
3181 wc.status = IB_WC_LOC_PROT_ERR;
3182 err:
3183 /* responder goes to error state */
3184 rvt_rc_error(qp, wc.status);
3185
3186 serr:
3187 spin_unlock_irqrestore(&qp->r_lock, flags);
3188 serr_no_r_lock:
3189 spin_lock_irqsave(&sqp->s_lock, flags);
3190 spin_lock(&sqp->r_lock);
3191 rvt_send_complete(sqp, wqe, send_status);
3192 spin_unlock(&sqp->r_lock);
3193 if (sqp->ibqp.qp_type == IB_QPT_RC) {
3194 int lastwqe;
3195
3196 spin_lock(&sqp->r_lock);
3197 lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
3198 spin_unlock(&sqp->r_lock);
3199
3200 sqp->s_flags &= ~RVT_S_BUSY;
3201 spin_unlock_irqrestore(&sqp->s_lock, flags);
3202 if (lastwqe) {
3203 struct ib_event ev;
3204
3205 ev.device = sqp->ibqp.device;
3206 ev.element.qp = &sqp->ibqp;
3207 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
3208 sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
3209 }
3210 goto done;
3211 }
3212 clr_busy:
3213 sqp->s_flags &= ~RVT_S_BUSY;
3214 unlock:
3215 spin_unlock_irqrestore(&sqp->s_lock, flags);
3216 done:
3217 rcu_read_unlock();
3218 }
3219 EXPORT_SYMBOL(rvt_ruc_loopback);
3220