1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2015 - 2021 Intel Corporation */
3 #include "main.h"
4
5 /**
6 * irdma_arp_table -manage arp table
7 * @rf: RDMA PCI function
8 * @ip_addr: ip address for device
9 * @ipv4: IPv4 flag
10 * @mac_addr: mac address ptr
11 * @action: modify, delete or add
12 */
irdma_arp_table(struct irdma_pci_f * rf,u32 * ip_addr,bool ipv4,const u8 * mac_addr,u32 action)13 int irdma_arp_table(struct irdma_pci_f *rf, u32 *ip_addr, bool ipv4,
14 const u8 *mac_addr, u32 action)
15 {
16 unsigned long flags;
17 int arp_index;
18 u32 ip[4] = {};
19
20 if (ipv4)
21 ip[0] = *ip_addr;
22 else
23 memcpy(ip, ip_addr, sizeof(ip));
24
25 spin_lock_irqsave(&rf->arp_lock, flags);
26 for (arp_index = 0; (u32)arp_index < rf->arp_table_size; arp_index++) {
27 if (!memcmp(rf->arp_table[arp_index].ip_addr, ip, sizeof(ip)))
28 break;
29 }
30
31 switch (action) {
32 case IRDMA_ARP_ADD:
33 if (arp_index != rf->arp_table_size) {
34 arp_index = -1;
35 break;
36 }
37
38 arp_index = 0;
39 if (irdma_alloc_rsrc(rf, rf->allocated_arps, rf->arp_table_size,
40 (u32 *)&arp_index, &rf->next_arp_index)) {
41 arp_index = -1;
42 break;
43 }
44
45 memcpy(rf->arp_table[arp_index].ip_addr, ip,
46 sizeof(rf->arp_table[arp_index].ip_addr));
47 ether_addr_copy(rf->arp_table[arp_index].mac_addr, mac_addr);
48 break;
49 case IRDMA_ARP_RESOLVE:
50 if (arp_index == rf->arp_table_size)
51 arp_index = -1;
52 break;
53 case IRDMA_ARP_DELETE:
54 if (arp_index == rf->arp_table_size) {
55 arp_index = -1;
56 break;
57 }
58
59 memset(rf->arp_table[arp_index].ip_addr, 0,
60 sizeof(rf->arp_table[arp_index].ip_addr));
61 eth_zero_addr(rf->arp_table[arp_index].mac_addr);
62 irdma_free_rsrc(rf, rf->allocated_arps, arp_index);
63 break;
64 default:
65 arp_index = -1;
66 break;
67 }
68
69 spin_unlock_irqrestore(&rf->arp_lock, flags);
70 return arp_index;
71 }
72
73 /**
74 * irdma_add_arp - add a new arp entry if needed
75 * @rf: RDMA function
76 * @ip: IP address
77 * @ipv4: IPv4 flag
78 * @mac: MAC address
79 */
irdma_add_arp(struct irdma_pci_f * rf,u32 * ip,bool ipv4,const u8 * mac)80 int irdma_add_arp(struct irdma_pci_f *rf, u32 *ip, bool ipv4, const u8 *mac)
81 {
82 int arpidx;
83
84 arpidx = irdma_arp_table(rf, &ip[0], ipv4, NULL, IRDMA_ARP_RESOLVE);
85 if (arpidx >= 0) {
86 if (ether_addr_equal(rf->arp_table[arpidx].mac_addr, mac))
87 return arpidx;
88
89 irdma_manage_arp_cache(rf, rf->arp_table[arpidx].mac_addr, ip,
90 ipv4, IRDMA_ARP_DELETE);
91 }
92
93 irdma_manage_arp_cache(rf, mac, ip, ipv4, IRDMA_ARP_ADD);
94
95 return irdma_arp_table(rf, ip, ipv4, NULL, IRDMA_ARP_RESOLVE);
96 }
97
98 /**
99 * wr32 - write 32 bits to hw register
100 * @hw: hardware information including registers
101 * @reg: register offset
102 * @val: value to write to register
103 */
wr32(struct irdma_hw * hw,u32 reg,u32 val)104 inline void wr32(struct irdma_hw *hw, u32 reg, u32 val)
105 {
106 writel(val, hw->hw_addr + reg);
107 }
108
109 /**
110 * rd32 - read a 32 bit hw register
111 * @hw: hardware information including registers
112 * @reg: register offset
113 *
114 * Return value of register content
115 */
rd32(struct irdma_hw * hw,u32 reg)116 inline u32 rd32(struct irdma_hw *hw, u32 reg)
117 {
118 return readl(hw->hw_addr + reg);
119 }
120
121 /**
122 * rd64 - read a 64 bit hw register
123 * @hw: hardware information including registers
124 * @reg: register offset
125 *
126 * Return value of register content
127 */
rd64(struct irdma_hw * hw,u32 reg)128 inline u64 rd64(struct irdma_hw *hw, u32 reg)
129 {
130 return readq(hw->hw_addr + reg);
131 }
132
irdma_gid_change_event(struct ib_device * ibdev)133 static void irdma_gid_change_event(struct ib_device *ibdev)
134 {
135 struct ib_event ib_event;
136
137 ib_event.event = IB_EVENT_GID_CHANGE;
138 ib_event.device = ibdev;
139 ib_event.element.port_num = 1;
140 ib_dispatch_event(&ib_event);
141 }
142
143 /**
144 * irdma_inetaddr_event - system notifier for ipv4 addr events
145 * @notifier: not used
146 * @event: event for notifier
147 * @ptr: if address
148 */
irdma_inetaddr_event(struct notifier_block * notifier,unsigned long event,void * ptr)149 int irdma_inetaddr_event(struct notifier_block *notifier, unsigned long event,
150 void *ptr)
151 {
152 struct in_ifaddr *ifa = ptr;
153 struct net_device *real_dev, *netdev = ifa->ifa_dev->dev;
154 struct irdma_device *iwdev;
155 struct ib_device *ibdev;
156 u32 local_ipaddr;
157
158 real_dev = rdma_vlan_dev_real_dev(netdev);
159 if (!real_dev)
160 real_dev = netdev;
161
162 ibdev = ib_device_get_by_netdev(real_dev, RDMA_DRIVER_IRDMA);
163 if (!ibdev)
164 return NOTIFY_DONE;
165
166 iwdev = to_iwdev(ibdev);
167 local_ipaddr = ntohl(ifa->ifa_address);
168 ibdev_dbg(&iwdev->ibdev,
169 "DEV: netdev %p event %lu local_ip=%pI4 MAC=%pM\n", real_dev,
170 event, &local_ipaddr, real_dev->dev_addr);
171 switch (event) {
172 case NETDEV_DOWN:
173 irdma_manage_arp_cache(iwdev->rf, real_dev->dev_addr,
174 &local_ipaddr, true, IRDMA_ARP_DELETE);
175 irdma_if_notify(iwdev, real_dev, &local_ipaddr, true, false);
176 irdma_gid_change_event(&iwdev->ibdev);
177 break;
178 case NETDEV_UP:
179 case NETDEV_CHANGEADDR:
180 irdma_add_arp(iwdev->rf, &local_ipaddr, true, real_dev->dev_addr);
181 irdma_if_notify(iwdev, real_dev, &local_ipaddr, true, true);
182 irdma_gid_change_event(&iwdev->ibdev);
183 break;
184 default:
185 break;
186 }
187
188 ib_device_put(ibdev);
189
190 return NOTIFY_DONE;
191 }
192
193 /**
194 * irdma_inet6addr_event - system notifier for ipv6 addr events
195 * @notifier: not used
196 * @event: event for notifier
197 * @ptr: if address
198 */
irdma_inet6addr_event(struct notifier_block * notifier,unsigned long event,void * ptr)199 int irdma_inet6addr_event(struct notifier_block *notifier, unsigned long event,
200 void *ptr)
201 {
202 struct inet6_ifaddr *ifa = ptr;
203 struct net_device *real_dev, *netdev = ifa->idev->dev;
204 struct irdma_device *iwdev;
205 struct ib_device *ibdev;
206 u32 local_ipaddr6[4];
207
208 real_dev = rdma_vlan_dev_real_dev(netdev);
209 if (!real_dev)
210 real_dev = netdev;
211
212 ibdev = ib_device_get_by_netdev(real_dev, RDMA_DRIVER_IRDMA);
213 if (!ibdev)
214 return NOTIFY_DONE;
215
216 iwdev = to_iwdev(ibdev);
217 irdma_copy_ip_ntohl(local_ipaddr6, ifa->addr.in6_u.u6_addr32);
218 ibdev_dbg(&iwdev->ibdev,
219 "DEV: netdev %p event %lu local_ip=%pI6 MAC=%pM\n", real_dev,
220 event, local_ipaddr6, real_dev->dev_addr);
221 switch (event) {
222 case NETDEV_DOWN:
223 irdma_manage_arp_cache(iwdev->rf, real_dev->dev_addr,
224 local_ipaddr6, false, IRDMA_ARP_DELETE);
225 irdma_if_notify(iwdev, real_dev, local_ipaddr6, false, false);
226 irdma_gid_change_event(&iwdev->ibdev);
227 break;
228 case NETDEV_UP:
229 case NETDEV_CHANGEADDR:
230 irdma_add_arp(iwdev->rf, local_ipaddr6, false,
231 real_dev->dev_addr);
232 irdma_if_notify(iwdev, real_dev, local_ipaddr6, false, true);
233 irdma_gid_change_event(&iwdev->ibdev);
234 break;
235 default:
236 break;
237 }
238
239 ib_device_put(ibdev);
240
241 return NOTIFY_DONE;
242 }
243
244 /**
245 * irdma_net_event - system notifier for net events
246 * @notifier: not used
247 * @event: event for notifier
248 * @ptr: neighbor
249 */
irdma_net_event(struct notifier_block * notifier,unsigned long event,void * ptr)250 int irdma_net_event(struct notifier_block *notifier, unsigned long event,
251 void *ptr)
252 {
253 struct neighbour *neigh = ptr;
254 struct net_device *real_dev, *netdev;
255 struct irdma_device *iwdev;
256 struct ib_device *ibdev;
257 __be32 *p;
258 u32 local_ipaddr[4] = {};
259 bool ipv4 = true;
260
261 switch (event) {
262 case NETEVENT_NEIGH_UPDATE:
263 netdev = neigh->dev;
264 real_dev = rdma_vlan_dev_real_dev(netdev);
265 if (!real_dev)
266 real_dev = netdev;
267 ibdev = ib_device_get_by_netdev(real_dev, RDMA_DRIVER_IRDMA);
268 if (!ibdev)
269 return NOTIFY_DONE;
270
271 iwdev = to_iwdev(ibdev);
272 p = (__be32 *)neigh->primary_key;
273 if (neigh->tbl->family == AF_INET6) {
274 ipv4 = false;
275 irdma_copy_ip_ntohl(local_ipaddr, p);
276 } else {
277 local_ipaddr[0] = ntohl(*p);
278 }
279
280 ibdev_dbg(&iwdev->ibdev,
281 "DEV: netdev %p state %d local_ip=%pI4 MAC=%pM\n",
282 iwdev->netdev, neigh->nud_state, local_ipaddr,
283 neigh->ha);
284
285 if (neigh->nud_state & NUD_VALID)
286 irdma_add_arp(iwdev->rf, local_ipaddr, ipv4, neigh->ha);
287
288 else
289 irdma_manage_arp_cache(iwdev->rf, neigh->ha,
290 local_ipaddr, ipv4,
291 IRDMA_ARP_DELETE);
292 ib_device_put(ibdev);
293 break;
294 default:
295 break;
296 }
297
298 return NOTIFY_DONE;
299 }
300
301 /**
302 * irdma_netdevice_event - system notifier for netdev events
303 * @notifier: not used
304 * @event: event for notifier
305 * @ptr: netdev
306 */
irdma_netdevice_event(struct notifier_block * notifier,unsigned long event,void * ptr)307 int irdma_netdevice_event(struct notifier_block *notifier, unsigned long event,
308 void *ptr)
309 {
310 struct irdma_device *iwdev;
311 struct ib_device *ibdev;
312 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
313
314 ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_IRDMA);
315 if (!ibdev)
316 return NOTIFY_DONE;
317
318 iwdev = to_iwdev(ibdev);
319 iwdev->iw_status = 1;
320 switch (event) {
321 case NETDEV_DOWN:
322 iwdev->iw_status = 0;
323 fallthrough;
324 default:
325 break;
326 }
327 ib_device_put(ibdev);
328
329 return NOTIFY_DONE;
330 }
331
332 /**
333 * irdma_add_ipv6_addr - add ipv6 address to the hw arp table
334 * @iwdev: irdma device
335 */
irdma_add_ipv6_addr(struct irdma_device * iwdev)336 static void irdma_add_ipv6_addr(struct irdma_device *iwdev)
337 {
338 struct net_device *ip_dev;
339 struct inet6_dev *idev;
340 struct inet6_ifaddr *ifp, *tmp;
341 u32 local_ipaddr6[4];
342
343 rcu_read_lock();
344 for_each_netdev_rcu (&init_net, ip_dev) {
345 if (((rdma_vlan_dev_vlan_id(ip_dev) < 0xFFFF &&
346 rdma_vlan_dev_real_dev(ip_dev) == iwdev->netdev) ||
347 ip_dev == iwdev->netdev) &&
348 (READ_ONCE(ip_dev->flags) & IFF_UP)) {
349 idev = __in6_dev_get(ip_dev);
350 if (!idev) {
351 ibdev_err(&iwdev->ibdev, "ipv6 inet device not found\n");
352 break;
353 }
354 list_for_each_entry_safe (ifp, tmp, &idev->addr_list,
355 if_list) {
356 ibdev_dbg(&iwdev->ibdev,
357 "INIT: IP=%pI6, vlan_id=%d, MAC=%pM\n",
358 &ifp->addr,
359 rdma_vlan_dev_vlan_id(ip_dev),
360 ip_dev->dev_addr);
361
362 irdma_copy_ip_ntohl(local_ipaddr6,
363 ifp->addr.in6_u.u6_addr32);
364 irdma_manage_arp_cache(iwdev->rf,
365 ip_dev->dev_addr,
366 local_ipaddr6, false,
367 IRDMA_ARP_ADD);
368 }
369 }
370 }
371 rcu_read_unlock();
372 }
373
374 /**
375 * irdma_add_ipv4_addr - add ipv4 address to the hw arp table
376 * @iwdev: irdma device
377 */
irdma_add_ipv4_addr(struct irdma_device * iwdev)378 static void irdma_add_ipv4_addr(struct irdma_device *iwdev)
379 {
380 struct net_device *dev;
381 struct in_device *idev;
382 u32 ip_addr;
383
384 rcu_read_lock();
385 for_each_netdev_rcu (&init_net, dev) {
386 if (((rdma_vlan_dev_vlan_id(dev) < 0xFFFF &&
387 rdma_vlan_dev_real_dev(dev) == iwdev->netdev) ||
388 dev == iwdev->netdev) && (READ_ONCE(dev->flags) & IFF_UP)) {
389 const struct in_ifaddr *ifa;
390
391 idev = __in_dev_get_rcu(dev);
392 if (!idev)
393 continue;
394
395 in_dev_for_each_ifa_rcu(ifa, idev) {
396 ibdev_dbg(&iwdev->ibdev, "CM: IP=%pI4, vlan_id=%d, MAC=%pM\n",
397 &ifa->ifa_address, rdma_vlan_dev_vlan_id(dev),
398 dev->dev_addr);
399
400 ip_addr = ntohl(ifa->ifa_address);
401 irdma_manage_arp_cache(iwdev->rf, dev->dev_addr,
402 &ip_addr, true,
403 IRDMA_ARP_ADD);
404 }
405 }
406 }
407 rcu_read_unlock();
408 }
409
410 /**
411 * irdma_add_ip - add ip addresses
412 * @iwdev: irdma device
413 *
414 * Add ipv4/ipv6 addresses to the arp cache
415 */
irdma_add_ip(struct irdma_device * iwdev)416 void irdma_add_ip(struct irdma_device *iwdev)
417 {
418 irdma_add_ipv4_addr(iwdev);
419 irdma_add_ipv6_addr(iwdev);
420 }
421
422 /**
423 * irdma_alloc_and_get_cqp_request - get cqp struct
424 * @cqp: device cqp ptr
425 * @wait: cqp to be used in wait mode
426 */
irdma_alloc_and_get_cqp_request(struct irdma_cqp * cqp,bool wait)427 struct irdma_cqp_request *irdma_alloc_and_get_cqp_request(struct irdma_cqp *cqp,
428 bool wait)
429 {
430 struct irdma_cqp_request *cqp_request = NULL;
431 unsigned long flags;
432
433 spin_lock_irqsave(&cqp->req_lock, flags);
434 if (!list_empty(&cqp->cqp_avail_reqs)) {
435 cqp_request = list_first_entry(&cqp->cqp_avail_reqs,
436 struct irdma_cqp_request, list);
437 list_del_init(&cqp_request->list);
438 }
439 spin_unlock_irqrestore(&cqp->req_lock, flags);
440 if (!cqp_request) {
441 cqp_request = kzalloc_obj(*cqp_request, GFP_ATOMIC);
442 if (cqp_request) {
443 cqp_request->dynamic = true;
444 if (wait)
445 init_waitqueue_head(&cqp_request->waitq);
446 }
447 }
448 if (!cqp_request) {
449 ibdev_dbg(to_ibdev(cqp->sc_cqp.dev), "ERR: CQP Request Fail: No Memory");
450 return NULL;
451 }
452
453 cqp_request->waiting = wait;
454 refcount_set(&cqp_request->refcnt, 1);
455 memset(&cqp_request->compl_info, 0, sizeof(cqp_request->compl_info));
456 memset(&cqp_request->info, 0, sizeof(cqp_request->info));
457
458 return cqp_request;
459 }
460
461 /**
462 * irdma_get_cqp_request - increase refcount for cqp_request
463 * @cqp_request: pointer to cqp_request instance
464 */
irdma_get_cqp_request(struct irdma_cqp_request * cqp_request)465 static inline void irdma_get_cqp_request(struct irdma_cqp_request *cqp_request)
466 {
467 refcount_inc(&cqp_request->refcnt);
468 }
469
470 /**
471 * irdma_free_cqp_request - free cqp request
472 * @cqp: cqp ptr
473 * @cqp_request: to be put back in cqp list
474 */
irdma_free_cqp_request(struct irdma_cqp * cqp,struct irdma_cqp_request * cqp_request)475 void irdma_free_cqp_request(struct irdma_cqp *cqp,
476 struct irdma_cqp_request *cqp_request)
477 {
478 unsigned long flags;
479
480 if (cqp_request->dynamic) {
481 kfree(cqp_request);
482 } else {
483 WRITE_ONCE(cqp_request->request_done, false);
484 cqp_request->callback_fcn = NULL;
485 cqp_request->waiting = false;
486 cqp_request->pending = false;
487
488 spin_lock_irqsave(&cqp->req_lock, flags);
489 list_add_tail(&cqp_request->list, &cqp->cqp_avail_reqs);
490 spin_unlock_irqrestore(&cqp->req_lock, flags);
491 }
492 wake_up(&cqp->remove_wq);
493 }
494
495 /**
496 * irdma_put_cqp_request - dec ref count and free if 0
497 * @cqp: cqp ptr
498 * @cqp_request: to be put back in cqp list
499 */
irdma_put_cqp_request(struct irdma_cqp * cqp,struct irdma_cqp_request * cqp_request)500 void irdma_put_cqp_request(struct irdma_cqp *cqp,
501 struct irdma_cqp_request *cqp_request)
502 {
503 if (refcount_dec_and_test(&cqp_request->refcnt))
504 irdma_free_cqp_request(cqp, cqp_request);
505 }
506
507 /**
508 * irdma_free_pending_cqp_request -free pending cqp request objs
509 * @cqp: cqp ptr
510 * @cqp_request: to be put back in cqp list
511 */
512 static void
irdma_free_pending_cqp_request(struct irdma_cqp * cqp,struct irdma_cqp_request * cqp_request)513 irdma_free_pending_cqp_request(struct irdma_cqp *cqp,
514 struct irdma_cqp_request *cqp_request)
515 {
516 if (cqp_request->waiting) {
517 cqp_request->compl_info.error = true;
518 WRITE_ONCE(cqp_request->request_done, true);
519 wake_up(&cqp_request->waitq);
520 }
521 wait_event_timeout(cqp->remove_wq,
522 refcount_read(&cqp_request->refcnt) == 1, 1000);
523 irdma_put_cqp_request(cqp, cqp_request);
524 }
525
526 /**
527 * irdma_cleanup_deferred_cqp_ops - clean-up cqp with no completions
528 * @dev: sc_dev
529 * @cqp: cqp
530 */
irdma_cleanup_deferred_cqp_ops(struct irdma_sc_dev * dev,struct irdma_cqp * cqp)531 static void irdma_cleanup_deferred_cqp_ops(struct irdma_sc_dev *dev,
532 struct irdma_cqp *cqp)
533 {
534 u64 scratch;
535
536 /* process all CQP requests with deferred/pending completions */
537 while ((scratch = irdma_sc_cqp_cleanup_handler(dev)))
538 irdma_free_pending_cqp_request(cqp, (struct irdma_cqp_request *)
539 (uintptr_t)scratch);
540 }
541
542 /**
543 * irdma_cleanup_pending_cqp_op - clean-up cqp with no
544 * completions
545 * @rf: RDMA PCI function
546 */
irdma_cleanup_pending_cqp_op(struct irdma_pci_f * rf)547 void irdma_cleanup_pending_cqp_op(struct irdma_pci_f *rf)
548 {
549 struct irdma_sc_dev *dev = &rf->sc_dev;
550 struct irdma_cqp *cqp = &rf->cqp;
551 struct irdma_cqp_request *cqp_request = NULL;
552 struct cqp_cmds_info *pcmdinfo = NULL;
553 u32 i, pending_work, wqe_idx;
554
555 if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3)
556 irdma_cleanup_deferred_cqp_ops(dev, cqp);
557 pending_work = IRDMA_RING_USED_QUANTA(cqp->sc_cqp.sq_ring);
558 wqe_idx = IRDMA_RING_CURRENT_TAIL(cqp->sc_cqp.sq_ring);
559 for (i = 0; i < pending_work; i++) {
560 cqp_request = (struct irdma_cqp_request *)(unsigned long)
561 cqp->scratch_array[wqe_idx];
562 if (cqp_request)
563 irdma_free_pending_cqp_request(cqp, cqp_request);
564 wqe_idx = (wqe_idx + 1) % IRDMA_RING_SIZE(cqp->sc_cqp.sq_ring);
565 }
566
567 while (!list_empty(&dev->cqp_cmd_head)) {
568 pcmdinfo = irdma_remove_cqp_head(dev);
569 cqp_request =
570 container_of(pcmdinfo, struct irdma_cqp_request, info);
571 if (cqp_request)
572 irdma_free_pending_cqp_request(cqp, cqp_request);
573 }
574 }
575
irdma_get_timeout_threshold(struct irdma_sc_dev * dev)576 int irdma_get_timeout_threshold(struct irdma_sc_dev *dev)
577 {
578 u16 time_s = dev->vc_caps.cqp_timeout_s;
579
580 if (!time_s)
581 return CQP_TIMEOUT_THRESHOLD;
582
583 return time_s * 1000 / dev->hw_attrs.max_cqp_compl_wait_time_ms;
584 }
585
irdma_get_def_timeout_threshold(struct irdma_sc_dev * dev)586 static int irdma_get_def_timeout_threshold(struct irdma_sc_dev *dev)
587 {
588 u16 time_s = dev->vc_caps.cqp_def_timeout_s;
589
590 if (!time_s)
591 return CQP_DEF_CMPL_TIMEOUT_THRESHOLD;
592
593 return time_s * 1000 / dev->hw_attrs.max_cqp_compl_wait_time_ms;
594 }
595
596 /**
597 * irdma_wait_event - wait for completion
598 * @rf: RDMA PCI function
599 * @cqp_request: cqp request to wait
600 */
irdma_wait_event(struct irdma_pci_f * rf,struct irdma_cqp_request * cqp_request)601 static int irdma_wait_event(struct irdma_pci_f *rf,
602 struct irdma_cqp_request *cqp_request)
603 {
604 struct irdma_cqp_timeout cqp_timeout = {};
605 int timeout_threshold = irdma_get_timeout_threshold(&rf->sc_dev);
606 bool cqp_error = false;
607 int err_code = 0;
608
609 cqp_timeout.compl_cqp_cmds = atomic64_read(&rf->sc_dev.cqp->completed_ops);
610 do {
611 irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq);
612 if (wait_event_timeout(cqp_request->waitq,
613 READ_ONCE(cqp_request->request_done),
614 msecs_to_jiffies(CQP_COMPL_WAIT_TIME_MS)))
615 break;
616
617 if (cqp_request->pending)
618 /* There was a deferred or pending completion
619 * received for this CQP request, so we need
620 * to wait longer than usual.
621 */
622 timeout_threshold =
623 irdma_get_def_timeout_threshold(&rf->sc_dev);
624
625 irdma_check_cqp_progress(&cqp_timeout, &rf->sc_dev);
626
627 if (cqp_timeout.count < timeout_threshold)
628 continue;
629
630 if (!rf->reset) {
631 rf->reset = true;
632 rf->gen_ops.request_reset(rf);
633 }
634 return -ETIMEDOUT;
635 } while (1);
636
637 cqp_error = cqp_request->compl_info.error;
638 if (cqp_error) {
639 err_code = -EIO;
640 if (cqp_request->compl_info.maj_err_code == 0xFFFF) {
641 if (cqp_request->compl_info.min_err_code == 0x8002)
642 err_code = -EBUSY;
643 else if (cqp_request->compl_info.min_err_code == 0x8029) {
644 if (!rf->reset) {
645 rf->reset = true;
646 rf->gen_ops.request_reset(rf);
647 }
648 }
649 }
650 }
651
652 return err_code;
653 }
654
655 static const char *const irdma_cqp_cmd_names[IRDMA_MAX_CQP_OPS] = {
656 [IRDMA_OP_CEQ_DESTROY] = "Destroy CEQ Cmd",
657 [IRDMA_OP_AEQ_DESTROY] = "Destroy AEQ Cmd",
658 [IRDMA_OP_DELETE_ARP_CACHE_ENTRY] = "Delete ARP Cache Cmd",
659 [IRDMA_OP_MANAGE_APBVT_ENTRY] = "Manage APBV Table Entry Cmd",
660 [IRDMA_OP_CEQ_CREATE] = "CEQ Create Cmd",
661 [IRDMA_OP_AEQ_CREATE] = "AEQ Destroy Cmd",
662 [IRDMA_OP_MANAGE_QHASH_TABLE_ENTRY] = "Manage Quad Hash Table Entry Cmd",
663 [IRDMA_OP_QP_MODIFY] = "Modify QP Cmd",
664 [IRDMA_OP_QP_UPLOAD_CONTEXT] = "Upload Context Cmd",
665 [IRDMA_OP_CQ_CREATE] = "Create CQ Cmd",
666 [IRDMA_OP_CQ_DESTROY] = "Destroy CQ Cmd",
667 [IRDMA_OP_QP_CREATE] = "Create QP Cmd",
668 [IRDMA_OP_QP_DESTROY] = "Destroy QP Cmd",
669 [IRDMA_OP_ALLOC_STAG] = "Allocate STag Cmd",
670 [IRDMA_OP_MR_REG_NON_SHARED] = "Register Non-Shared MR Cmd",
671 [IRDMA_OP_DEALLOC_STAG] = "Deallocate STag Cmd",
672 [IRDMA_OP_MW_ALLOC] = "Allocate Memory Window Cmd",
673 [IRDMA_OP_QP_FLUSH_WQES] = "Flush QP Cmd",
674 [IRDMA_OP_ADD_ARP_CACHE_ENTRY] = "Add ARP Cache Cmd",
675 [IRDMA_OP_MANAGE_PUSH_PAGE] = "Manage Push Page Cmd",
676 [IRDMA_OP_UPDATE_PE_SDS] = "Update PE SDs Cmd",
677 [IRDMA_OP_MANAGE_HMC_PM_FUNC_TABLE] = "Manage HMC PM Function Table Cmd",
678 [IRDMA_OP_SUSPEND] = "Suspend QP Cmd",
679 [IRDMA_OP_RESUME] = "Resume QP Cmd",
680 [IRDMA_OP_MANAGE_VF_PBLE_BP] = "Manage VF PBLE Backing Pages Cmd",
681 [IRDMA_OP_QUERY_FPM_VAL] = "Query FPM Values Cmd",
682 [IRDMA_OP_COMMIT_FPM_VAL] = "Commit FPM Values Cmd",
683 [IRDMA_OP_AH_CREATE] = "Create Address Handle Cmd",
684 [IRDMA_OP_AH_MODIFY] = "Modify Address Handle Cmd",
685 [IRDMA_OP_AH_DESTROY] = "Destroy Address Handle Cmd",
686 [IRDMA_OP_MC_CREATE] = "Create Multicast Group Cmd",
687 [IRDMA_OP_MC_DESTROY] = "Destroy Multicast Group Cmd",
688 [IRDMA_OP_MC_MODIFY] = "Modify Multicast Group Cmd",
689 [IRDMA_OP_STATS_ALLOCATE] = "Add Statistics Instance Cmd",
690 [IRDMA_OP_STATS_FREE] = "Free Statistics Instance Cmd",
691 [IRDMA_OP_STATS_GATHER] = "Gather Statistics Cmd",
692 [IRDMA_OP_WS_ADD_NODE] = "Add Work Scheduler Node Cmd",
693 [IRDMA_OP_WS_MODIFY_NODE] = "Modify Work Scheduler Node Cmd",
694 [IRDMA_OP_WS_DELETE_NODE] = "Delete Work Scheduler Node Cmd",
695 [IRDMA_OP_SET_UP_MAP] = "Set UP-UP Mapping Cmd",
696 [IRDMA_OP_GEN_AE] = "Generate AE Cmd",
697 [IRDMA_OP_QUERY_RDMA_FEATURES] = "RDMA Get Features Cmd",
698 [IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY] = "Allocate Local MAC Entry Cmd",
699 [IRDMA_OP_ADD_LOCAL_MAC_ENTRY] = "Add Local MAC Entry Cmd",
700 [IRDMA_OP_DELETE_LOCAL_MAC_ENTRY] = "Delete Local MAC Entry Cmd",
701 [IRDMA_OP_CQ_MODIFY] = "CQ Modify Cmd",
702 [IRDMA_OP_SRQ_CREATE] = "Create SRQ Cmd",
703 [IRDMA_OP_SRQ_MODIFY] = "Modify SRQ Cmd",
704 [IRDMA_OP_SRQ_DESTROY] = "Destroy SRQ Cmd",
705 };
706
707 static const struct irdma_cqp_err_info irdma_noncrit_err_list[] = {
708 {0xffff, 0x8002, "Invalid State"},
709 {0xffff, 0x8006, "Flush No Wqe Pending"},
710 {0xffff, 0x8007, "Modify QP Bad Close"},
711 {0xffff, 0x8009, "LLP Closed"},
712 {0xffff, 0x800a, "Reset Not Sent"}
713 };
714
715 /**
716 * irdma_cqp_crit_err - check if CQP error is critical
717 * @dev: pointer to dev structure
718 * @cqp_cmd: code for last CQP operation
719 * @maj_err_code: major error code
720 * @min_err_code: minot error code
721 */
irdma_cqp_crit_err(struct irdma_sc_dev * dev,u8 cqp_cmd,u16 maj_err_code,u16 min_err_code)722 bool irdma_cqp_crit_err(struct irdma_sc_dev *dev, u8 cqp_cmd,
723 u16 maj_err_code, u16 min_err_code)
724 {
725 int i;
726
727 for (i = 0; i < ARRAY_SIZE(irdma_noncrit_err_list); ++i) {
728 if (maj_err_code == irdma_noncrit_err_list[i].maj &&
729 min_err_code == irdma_noncrit_err_list[i].min) {
730 ibdev_dbg(to_ibdev(dev),
731 "CQP: [%s Error][%s] maj=0x%x min=0x%x\n",
732 irdma_noncrit_err_list[i].desc,
733 irdma_cqp_cmd_names[cqp_cmd], maj_err_code,
734 min_err_code);
735 return false;
736 }
737 }
738 return true;
739 }
740
741 /**
742 * irdma_handle_cqp_op - process cqp command
743 * @rf: RDMA PCI function
744 * @cqp_request: cqp request to process
745 */
irdma_handle_cqp_op(struct irdma_pci_f * rf,struct irdma_cqp_request * cqp_request)746 int irdma_handle_cqp_op(struct irdma_pci_f *rf,
747 struct irdma_cqp_request *cqp_request)
748 {
749 struct irdma_sc_dev *dev = &rf->sc_dev;
750 struct cqp_cmds_info *info = &cqp_request->info;
751 int status;
752 bool put_cqp_request = true;
753
754 if (rf->reset)
755 return -EBUSY;
756
757 irdma_get_cqp_request(cqp_request);
758 status = irdma_process_cqp_cmd(dev, info);
759 if (status)
760 goto err;
761
762 if (cqp_request->waiting) {
763 put_cqp_request = false;
764 status = irdma_wait_event(rf, cqp_request);
765 if (status)
766 goto err;
767 }
768
769 return 0;
770
771 err:
772 if (irdma_cqp_crit_err(dev, info->cqp_cmd,
773 cqp_request->compl_info.maj_err_code,
774 cqp_request->compl_info.min_err_code))
775 ibdev_err(&rf->iwdev->ibdev,
776 "[%s Error][op_code=%d] status=%d waiting=%d completion_err=%d maj=0x%x min=0x%x\n",
777 irdma_cqp_cmd_names[info->cqp_cmd], info->cqp_cmd, status, cqp_request->waiting,
778 cqp_request->compl_info.error, cqp_request->compl_info.maj_err_code,
779 cqp_request->compl_info.min_err_code);
780
781 if (put_cqp_request)
782 irdma_put_cqp_request(&rf->cqp, cqp_request);
783
784 return status;
785 }
786
irdma_qp_add_ref(struct ib_qp * ibqp)787 void irdma_qp_add_ref(struct ib_qp *ibqp)
788 {
789 struct irdma_qp *iwqp = (struct irdma_qp *)ibqp;
790
791 refcount_inc(&iwqp->refcnt);
792 }
793
irdma_qp_rem_ref(struct ib_qp * ibqp)794 void irdma_qp_rem_ref(struct ib_qp *ibqp)
795 {
796 struct irdma_qp *iwqp = to_iwqp(ibqp);
797 struct irdma_device *iwdev = iwqp->iwdev;
798 u32 qp_num;
799 unsigned long flags;
800
801 spin_lock_irqsave(&iwdev->rf->qptable_lock, flags);
802 if (!refcount_dec_and_test(&iwqp->refcnt)) {
803 spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags);
804 return;
805 }
806
807 qp_num = iwqp->ibqp.qp_num;
808 iwdev->rf->qp_table[qp_num] = NULL;
809 spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags);
810 complete(&iwqp->free_qp);
811 }
812
irdma_cq_add_ref(struct ib_cq * ibcq)813 void irdma_cq_add_ref(struct ib_cq *ibcq)
814 {
815 struct irdma_cq *iwcq = to_iwcq(ibcq);
816
817 refcount_inc(&iwcq->refcnt);
818 }
819
irdma_cq_rem_ref(struct ib_cq * ibcq)820 void irdma_cq_rem_ref(struct ib_cq *ibcq)
821 {
822 struct ib_device *ibdev = ibcq->device;
823 struct irdma_device *iwdev = to_iwdev(ibdev);
824 struct irdma_cq *iwcq = to_iwcq(ibcq);
825 unsigned long flags;
826
827 spin_lock_irqsave(&iwdev->rf->cqtable_lock, flags);
828 if (!refcount_dec_and_test(&iwcq->refcnt)) {
829 spin_unlock_irqrestore(&iwdev->rf->cqtable_lock, flags);
830 return;
831 }
832
833 /* May be asynchronously sampled by CEQ ISR without holding tbl lock. */
834 WRITE_ONCE(iwdev->rf->cq_table[iwcq->cq_num], NULL);
835 spin_unlock_irqrestore(&iwdev->rf->cqtable_lock, flags);
836 complete(&iwcq->free_cq);
837 }
838
to_ibdev(struct irdma_sc_dev * dev)839 struct ib_device *to_ibdev(struct irdma_sc_dev *dev)
840 {
841 return &(container_of(dev, struct irdma_pci_f, sc_dev))->iwdev->ibdev;
842 }
843
844 /**
845 * irdma_get_qp - get qp address
846 * @device: iwarp device
847 * @qpn: qp number
848 */
irdma_get_qp(struct ib_device * device,int qpn)849 struct ib_qp *irdma_get_qp(struct ib_device *device, int qpn)
850 {
851 struct irdma_device *iwdev = to_iwdev(device);
852
853 if (qpn < IW_FIRST_QPN || qpn >= iwdev->rf->max_qp)
854 return NULL;
855
856 return &iwdev->rf->qp_table[qpn]->ibqp;
857 }
858
859 /**
860 * irdma_remove_cqp_head - return head entry and remove
861 * @dev: device
862 */
irdma_remove_cqp_head(struct irdma_sc_dev * dev)863 void *irdma_remove_cqp_head(struct irdma_sc_dev *dev)
864 {
865 struct list_head *entry;
866 struct list_head *list = &dev->cqp_cmd_head;
867
868 if (list_empty(list))
869 return NULL;
870
871 entry = list->next;
872 list_del(entry);
873
874 return entry;
875 }
876
877 /**
878 * irdma_cqp_sds_cmd - create cqp command for sd
879 * @dev: hardware control device structure
880 * @sdinfo: information for sd cqp
881 *
882 */
irdma_cqp_sds_cmd(struct irdma_sc_dev * dev,struct irdma_update_sds_info * sdinfo)883 int irdma_cqp_sds_cmd(struct irdma_sc_dev *dev,
884 struct irdma_update_sds_info *sdinfo)
885 {
886 struct irdma_cqp_request *cqp_request;
887 struct cqp_cmds_info *cqp_info;
888 struct irdma_pci_f *rf = dev_to_rf(dev);
889 int status;
890
891 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
892 if (!cqp_request)
893 return -ENOMEM;
894
895 cqp_info = &cqp_request->info;
896 memcpy(&cqp_info->in.u.update_pe_sds.info, sdinfo,
897 sizeof(cqp_info->in.u.update_pe_sds.info));
898 cqp_info->cqp_cmd = IRDMA_OP_UPDATE_PE_SDS;
899 cqp_info->post_sq = 1;
900 cqp_info->in.u.update_pe_sds.dev = dev;
901 cqp_info->in.u.update_pe_sds.scratch = (uintptr_t)cqp_request;
902
903 status = irdma_handle_cqp_op(rf, cqp_request);
904 irdma_put_cqp_request(&rf->cqp, cqp_request);
905
906 return status;
907 }
908
909 /**
910 * irdma_cqp_qp_suspend_resume - cqp command for suspend/resume
911 * @qp: hardware control qp
912 * @op: suspend or resume
913 */
irdma_cqp_qp_suspend_resume(struct irdma_sc_qp * qp,u8 op)914 int irdma_cqp_qp_suspend_resume(struct irdma_sc_qp *qp, u8 op)
915 {
916 struct irdma_sc_dev *dev = qp->dev;
917 struct irdma_cqp_request *cqp_request;
918 struct irdma_sc_cqp *cqp = dev->cqp;
919 struct cqp_cmds_info *cqp_info;
920 struct irdma_pci_f *rf = dev_to_rf(dev);
921 int status;
922
923 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, false);
924 if (!cqp_request)
925 return -ENOMEM;
926
927 cqp_info = &cqp_request->info;
928 cqp_info->cqp_cmd = op;
929 cqp_info->in.u.suspend_resume.cqp = cqp;
930 cqp_info->in.u.suspend_resume.qp = qp;
931 cqp_info->in.u.suspend_resume.scratch = (uintptr_t)cqp_request;
932
933 status = irdma_handle_cqp_op(rf, cqp_request);
934 irdma_put_cqp_request(&rf->cqp, cqp_request);
935
936 return status;
937 }
938
939 /**
940 * irdma_term_modify_qp - modify qp for term message
941 * @qp: hardware control qp
942 * @next_state: qp's next state
943 * @term: terminate code
944 * @term_len: length
945 */
irdma_term_modify_qp(struct irdma_sc_qp * qp,u8 next_state,u8 term,u8 term_len)946 void irdma_term_modify_qp(struct irdma_sc_qp *qp, u8 next_state, u8 term,
947 u8 term_len)
948 {
949 struct irdma_qp *iwqp;
950
951 iwqp = qp->qp_uk.back_qp;
952 irdma_next_iw_state(iwqp, next_state, 0, term, term_len);
953 };
954
955 /**
956 * irdma_terminate_done - after terminate is completed
957 * @qp: hardware control qp
958 * @timeout_occurred: indicates if terminate timer expired
959 */
irdma_terminate_done(struct irdma_sc_qp * qp,int timeout_occurred)960 void irdma_terminate_done(struct irdma_sc_qp *qp, int timeout_occurred)
961 {
962 struct irdma_qp *iwqp;
963 u8 hte = 0;
964 bool first_time;
965 unsigned long flags;
966
967 iwqp = qp->qp_uk.back_qp;
968 spin_lock_irqsave(&iwqp->lock, flags);
969 if (iwqp->hte_added) {
970 iwqp->hte_added = 0;
971 hte = 1;
972 }
973 first_time = !(qp->term_flags & IRDMA_TERM_DONE);
974 qp->term_flags |= IRDMA_TERM_DONE;
975 spin_unlock_irqrestore(&iwqp->lock, flags);
976 if (first_time) {
977 if (!timeout_occurred)
978 irdma_terminate_del_timer(qp);
979
980 irdma_next_iw_state(iwqp, IRDMA_QP_STATE_ERROR, hte, 0, 0);
981 irdma_cm_disconn(iwqp);
982 }
983 }
984
irdma_terminate_timeout(struct timer_list * t)985 static void irdma_terminate_timeout(struct timer_list *t)
986 {
987 struct irdma_qp *iwqp = timer_container_of(iwqp, t, terminate_timer);
988 struct irdma_sc_qp *qp = &iwqp->sc_qp;
989
990 irdma_terminate_done(qp, 1);
991 irdma_qp_rem_ref(&iwqp->ibqp);
992 }
993
994 /**
995 * irdma_terminate_start_timer - start terminate timeout
996 * @qp: hardware control qp
997 */
irdma_terminate_start_timer(struct irdma_sc_qp * qp)998 void irdma_terminate_start_timer(struct irdma_sc_qp *qp)
999 {
1000 struct irdma_qp *iwqp;
1001
1002 iwqp = qp->qp_uk.back_qp;
1003 irdma_qp_add_ref(&iwqp->ibqp);
1004 timer_setup(&iwqp->terminate_timer, irdma_terminate_timeout, 0);
1005 iwqp->terminate_timer.expires = jiffies + HZ;
1006
1007 add_timer(&iwqp->terminate_timer);
1008 }
1009
1010 /**
1011 * irdma_terminate_del_timer - delete terminate timeout
1012 * @qp: hardware control qp
1013 */
irdma_terminate_del_timer(struct irdma_sc_qp * qp)1014 void irdma_terminate_del_timer(struct irdma_sc_qp *qp)
1015 {
1016 struct irdma_qp *iwqp;
1017 int ret;
1018
1019 iwqp = qp->qp_uk.back_qp;
1020 ret = timer_delete(&iwqp->terminate_timer);
1021 if (ret)
1022 irdma_qp_rem_ref(&iwqp->ibqp);
1023 }
1024
1025 /**
1026 * irdma_cqp_cq_create_cmd - create a cq for the cqp
1027 * @dev: device pointer
1028 * @cq: pointer to created cq
1029 */
irdma_cqp_cq_create_cmd(struct irdma_sc_dev * dev,struct irdma_sc_cq * cq)1030 int irdma_cqp_cq_create_cmd(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq)
1031 {
1032 struct irdma_pci_f *rf = dev_to_rf(dev);
1033 struct irdma_cqp *iwcqp = &rf->cqp;
1034 struct irdma_cqp_request *cqp_request;
1035 struct cqp_cmds_info *cqp_info;
1036 int status;
1037
1038 cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
1039 if (!cqp_request)
1040 return -ENOMEM;
1041
1042 cqp_info = &cqp_request->info;
1043 cqp_info->cqp_cmd = IRDMA_OP_CQ_CREATE;
1044 cqp_info->post_sq = 1;
1045 cqp_info->in.u.cq_create.cq = cq;
1046 cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;
1047
1048 status = irdma_handle_cqp_op(rf, cqp_request);
1049 irdma_put_cqp_request(iwcqp, cqp_request);
1050
1051 return status;
1052 }
1053
1054 /**
1055 * irdma_cqp_qp_create_cmd - create a qp for the cqp
1056 * @dev: device pointer
1057 * @qp: pointer to created qp
1058 */
irdma_cqp_qp_create_cmd(struct irdma_sc_dev * dev,struct irdma_sc_qp * qp)1059 int irdma_cqp_qp_create_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp)
1060 {
1061 struct irdma_pci_f *rf = dev_to_rf(dev);
1062 struct irdma_cqp *iwcqp = &rf->cqp;
1063 struct irdma_cqp_request *cqp_request;
1064 struct cqp_cmds_info *cqp_info;
1065 struct irdma_create_qp_info *qp_info;
1066 int status;
1067
1068 cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
1069 if (!cqp_request)
1070 return -ENOMEM;
1071
1072 cqp_info = &cqp_request->info;
1073 qp_info = &cqp_request->info.in.u.qp_create.info;
1074 qp_info->cq_num_valid = true;
1075 qp_info->next_iwarp_state = IRDMA_QP_STATE_RTS;
1076 cqp_info->cqp_cmd = IRDMA_OP_QP_CREATE;
1077 cqp_info->post_sq = 1;
1078 cqp_info->in.u.qp_create.qp = qp;
1079 cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request;
1080
1081 status = irdma_handle_cqp_op(rf, cqp_request);
1082 irdma_put_cqp_request(iwcqp, cqp_request);
1083
1084 return status;
1085 }
1086
1087 /**
1088 * irdma_dealloc_push_page - free a push page for qp
1089 * @rf: RDMA PCI function
1090 * @qp: hardware control qp
1091 */
irdma_dealloc_push_page(struct irdma_pci_f * rf,struct irdma_sc_qp * qp)1092 static void irdma_dealloc_push_page(struct irdma_pci_f *rf,
1093 struct irdma_sc_qp *qp)
1094 {
1095 struct irdma_cqp_request *cqp_request;
1096 struct cqp_cmds_info *cqp_info;
1097 int status;
1098
1099 if (qp->push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX)
1100 return;
1101
1102 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, false);
1103 if (!cqp_request)
1104 return;
1105
1106 cqp_info = &cqp_request->info;
1107 cqp_info->cqp_cmd = IRDMA_OP_MANAGE_PUSH_PAGE;
1108 cqp_info->post_sq = 1;
1109 cqp_info->in.u.manage_push_page.info.push_idx = qp->push_idx;
1110 cqp_info->in.u.manage_push_page.info.qs_handle = qp->qs_handle;
1111 cqp_info->in.u.manage_push_page.info.free_page = 1;
1112 cqp_info->in.u.manage_push_page.info.push_page_type = 0;
1113 cqp_info->in.u.manage_push_page.cqp = &rf->cqp.sc_cqp;
1114 cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
1115 status = irdma_handle_cqp_op(rf, cqp_request);
1116 if (!status)
1117 qp->push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX;
1118 irdma_put_cqp_request(&rf->cqp, cqp_request);
1119 }
1120
irdma_free_gsi_qp_rsrc(struct irdma_qp * iwqp,u32 qp_num)1121 static void irdma_free_gsi_qp_rsrc(struct irdma_qp *iwqp, u32 qp_num)
1122 {
1123 struct irdma_device *iwdev = iwqp->iwdev;
1124 struct irdma_pci_f *rf = iwdev->rf;
1125 unsigned long flags;
1126
1127 if (rf->sc_dev.hw_attrs.uk_attrs.hw_rev < IRDMA_GEN_3)
1128 return;
1129
1130 irdma_vchnl_req_del_vport(&rf->sc_dev, iwdev->vport_id, qp_num);
1131
1132 if (qp_num == 1) {
1133 spin_lock_irqsave(&rf->rsrc_lock, flags);
1134 rf->hwqp1_rsvd = false;
1135 spin_unlock_irqrestore(&rf->rsrc_lock, flags);
1136 } else if (qp_num > 2) {
1137 irdma_free_rsrc(rf, rf->allocated_qps, qp_num);
1138 }
1139 }
1140
1141 /**
1142 * irdma_free_qp_rsrc - free up memory resources for qp
1143 * @iwqp: qp ptr (user or kernel)
1144 */
irdma_free_qp_rsrc(struct irdma_qp * iwqp)1145 void irdma_free_qp_rsrc(struct irdma_qp *iwqp)
1146 {
1147 struct irdma_device *iwdev = iwqp->iwdev;
1148 struct irdma_pci_f *rf = iwdev->rf;
1149 u32 qp_num = iwqp->sc_qp.qp_uk.qp_id;
1150
1151 irdma_ieq_cleanup_qp(iwdev->vsi.ieq, &iwqp->sc_qp);
1152 irdma_dealloc_push_page(rf, &iwqp->sc_qp);
1153 if (iwqp->sc_qp.vsi) {
1154 irdma_qp_rem_qos(&iwqp->sc_qp);
1155 iwqp->sc_qp.dev->ws_remove(iwqp->sc_qp.vsi,
1156 iwqp->sc_qp.user_pri);
1157 }
1158
1159 if (iwqp->ibqp.qp_type == IB_QPT_GSI) {
1160 irdma_free_gsi_qp_rsrc(iwqp, qp_num);
1161 } else {
1162 if (qp_num > 2)
1163 irdma_free_rsrc(rf, rf->allocated_qps, qp_num);
1164 }
1165 dma_free_coherent(rf->sc_dev.hw->device, iwqp->q2_ctx_mem.size,
1166 iwqp->q2_ctx_mem.va, iwqp->q2_ctx_mem.pa);
1167 iwqp->q2_ctx_mem.va = NULL;
1168 dma_free_coherent(rf->sc_dev.hw->device, iwqp->kqp.dma_mem.size,
1169 iwqp->kqp.dma_mem.va, iwqp->kqp.dma_mem.pa);
1170 iwqp->kqp.dma_mem.va = NULL;
1171 kfree(iwqp->kqp.sq_wrid_mem);
1172 kfree(iwqp->kqp.rq_wrid_mem);
1173 }
1174
1175 /**
1176 * irdma_srq_wq_destroy - send srq destroy cqp
1177 * @rf: RDMA PCI function
1178 * @srq: hardware control srq
1179 */
irdma_srq_wq_destroy(struct irdma_pci_f * rf,struct irdma_sc_srq * srq)1180 void irdma_srq_wq_destroy(struct irdma_pci_f *rf, struct irdma_sc_srq *srq)
1181 {
1182 struct irdma_cqp_request *cqp_request;
1183 struct cqp_cmds_info *cqp_info;
1184
1185 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
1186 if (!cqp_request)
1187 return;
1188
1189 cqp_info = &cqp_request->info;
1190 cqp_info->cqp_cmd = IRDMA_OP_SRQ_DESTROY;
1191 cqp_info->post_sq = 1;
1192 cqp_info->in.u.srq_destroy.srq = srq;
1193 cqp_info->in.u.srq_destroy.scratch = (uintptr_t)cqp_request;
1194
1195 irdma_handle_cqp_op(rf, cqp_request);
1196 irdma_put_cqp_request(&rf->cqp, cqp_request);
1197 }
1198
1199 /**
1200 * irdma_cq_wq_destroy - send cq destroy cqp
1201 * @rf: RDMA PCI function
1202 * @cq: hardware control cq
1203 */
irdma_cq_wq_destroy(struct irdma_pci_f * rf,struct irdma_sc_cq * cq)1204 void irdma_cq_wq_destroy(struct irdma_pci_f *rf, struct irdma_sc_cq *cq)
1205 {
1206 struct irdma_cqp_request *cqp_request;
1207 struct cqp_cmds_info *cqp_info;
1208
1209 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
1210 if (!cqp_request)
1211 return;
1212
1213 cqp_info = &cqp_request->info;
1214 cqp_info->cqp_cmd = IRDMA_OP_CQ_DESTROY;
1215 cqp_info->post_sq = 1;
1216 cqp_info->in.u.cq_destroy.cq = cq;
1217 cqp_info->in.u.cq_destroy.scratch = (uintptr_t)cqp_request;
1218
1219 irdma_handle_cqp_op(rf, cqp_request);
1220 irdma_put_cqp_request(&rf->cqp, cqp_request);
1221 }
1222
1223 /**
1224 * irdma_hw_modify_qp_callback - handle state for modifyQPs that don't wait
1225 * @cqp_request: modify QP completion
1226 */
irdma_hw_modify_qp_callback(struct irdma_cqp_request * cqp_request)1227 static void irdma_hw_modify_qp_callback(struct irdma_cqp_request *cqp_request)
1228 {
1229 struct cqp_cmds_info *cqp_info;
1230 struct irdma_qp *iwqp;
1231
1232 cqp_info = &cqp_request->info;
1233 iwqp = cqp_info->in.u.qp_modify.qp->qp_uk.back_qp;
1234 atomic_dec(&iwqp->hw_mod_qp_pend);
1235 wake_up(&iwqp->mod_qp_waitq);
1236 }
1237
1238 /**
1239 * irdma_hw_modify_qp - setup cqp for modify qp
1240 * @iwdev: RDMA device
1241 * @iwqp: qp ptr (user or kernel)
1242 * @info: info for modify qp
1243 * @wait: flag to wait or not for modify qp completion
1244 */
irdma_hw_modify_qp(struct irdma_device * iwdev,struct irdma_qp * iwqp,struct irdma_modify_qp_info * info,bool wait)1245 int irdma_hw_modify_qp(struct irdma_device *iwdev, struct irdma_qp *iwqp,
1246 struct irdma_modify_qp_info *info, bool wait)
1247 {
1248 int status;
1249 struct irdma_pci_f *rf = iwdev->rf;
1250 struct irdma_cqp_request *cqp_request;
1251 struct cqp_cmds_info *cqp_info;
1252 struct irdma_modify_qp_info *m_info;
1253
1254 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait);
1255 if (!cqp_request)
1256 return -ENOMEM;
1257
1258 if (!wait) {
1259 cqp_request->callback_fcn = irdma_hw_modify_qp_callback;
1260 atomic_inc(&iwqp->hw_mod_qp_pend);
1261 }
1262 cqp_info = &cqp_request->info;
1263 m_info = &cqp_info->in.u.qp_modify.info;
1264 memcpy(m_info, info, sizeof(*m_info));
1265 cqp_info->cqp_cmd = IRDMA_OP_QP_MODIFY;
1266 cqp_info->post_sq = 1;
1267 cqp_info->in.u.qp_modify.qp = &iwqp->sc_qp;
1268 cqp_info->in.u.qp_modify.scratch = (uintptr_t)cqp_request;
1269 status = irdma_handle_cqp_op(rf, cqp_request);
1270 irdma_put_cqp_request(&rf->cqp, cqp_request);
1271 if (status) {
1272 if (rdma_protocol_roce(&iwdev->ibdev, 1))
1273 return status;
1274
1275 switch (m_info->next_iwarp_state) {
1276 struct irdma_gen_ae_info ae_info;
1277
1278 case IRDMA_QP_STATE_RTS:
1279 case IRDMA_QP_STATE_IDLE:
1280 case IRDMA_QP_STATE_TERMINATE:
1281 case IRDMA_QP_STATE_CLOSING:
1282 if (info->curr_iwarp_state == IRDMA_QP_STATE_IDLE)
1283 irdma_send_reset(iwqp->cm_node);
1284 else
1285 iwqp->sc_qp.term_flags = IRDMA_TERM_DONE;
1286 if (!wait) {
1287 ae_info.ae_code = IRDMA_AE_BAD_CLOSE;
1288 ae_info.ae_src = 0;
1289 irdma_gen_ae(rf, &iwqp->sc_qp, &ae_info, false);
1290 } else {
1291 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp,
1292 wait);
1293 if (!cqp_request)
1294 return -ENOMEM;
1295
1296 cqp_info = &cqp_request->info;
1297 m_info = &cqp_info->in.u.qp_modify.info;
1298 memcpy(m_info, info, sizeof(*m_info));
1299 cqp_info->cqp_cmd = IRDMA_OP_QP_MODIFY;
1300 cqp_info->post_sq = 1;
1301 cqp_info->in.u.qp_modify.qp = &iwqp->sc_qp;
1302 cqp_info->in.u.qp_modify.scratch = (uintptr_t)cqp_request;
1303 m_info->next_iwarp_state = IRDMA_QP_STATE_ERROR;
1304 m_info->reset_tcp_conn = true;
1305 irdma_handle_cqp_op(rf, cqp_request);
1306 irdma_put_cqp_request(&rf->cqp, cqp_request);
1307 }
1308 break;
1309 case IRDMA_QP_STATE_ERROR:
1310 default:
1311 break;
1312 }
1313 }
1314
1315 return status;
1316 }
1317
1318 /**
1319 * irdma_cqp_cq_destroy_cmd - destroy the cqp cq
1320 * @dev: device pointer
1321 * @cq: pointer to cq
1322 */
irdma_cqp_cq_destroy_cmd(struct irdma_sc_dev * dev,struct irdma_sc_cq * cq)1323 void irdma_cqp_cq_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq)
1324 {
1325 struct irdma_pci_f *rf = dev_to_rf(dev);
1326
1327 irdma_cq_wq_destroy(rf, cq);
1328 }
1329
1330 /**
1331 * irdma_cqp_qp_destroy_cmd - destroy the cqp
1332 * @dev: device pointer
1333 * @qp: pointer to qp
1334 */
irdma_cqp_qp_destroy_cmd(struct irdma_sc_dev * dev,struct irdma_sc_qp * qp)1335 int irdma_cqp_qp_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp)
1336 {
1337 struct irdma_pci_f *rf = dev_to_rf(dev);
1338 struct irdma_cqp *iwcqp = &rf->cqp;
1339 struct irdma_cqp_request *cqp_request;
1340 struct cqp_cmds_info *cqp_info;
1341 int status;
1342
1343 cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
1344 if (!cqp_request)
1345 return -ENOMEM;
1346
1347 cqp_info = &cqp_request->info;
1348 cqp_info->cqp_cmd = IRDMA_OP_QP_DESTROY;
1349 cqp_info->post_sq = 1;
1350 cqp_info->in.u.qp_destroy.qp = qp;
1351 cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request;
1352 cqp_info->in.u.qp_destroy.remove_hash_idx = true;
1353
1354 status = irdma_handle_cqp_op(rf, cqp_request);
1355 irdma_put_cqp_request(&rf->cqp, cqp_request);
1356
1357 return status;
1358 }
1359
1360 /**
1361 * irdma_ieq_mpa_crc_ae - generate AE for crc error
1362 * @dev: hardware control device structure
1363 * @qp: hardware control qp
1364 */
irdma_ieq_mpa_crc_ae(struct irdma_sc_dev * dev,struct irdma_sc_qp * qp)1365 void irdma_ieq_mpa_crc_ae(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp)
1366 {
1367 struct irdma_gen_ae_info info = {};
1368 struct irdma_pci_f *rf = dev_to_rf(dev);
1369
1370 ibdev_dbg(&rf->iwdev->ibdev, "AEQ: Generate MPA CRC AE\n");
1371 info.ae_code = IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR;
1372 info.ae_src = IRDMA_AE_SOURCE_RQ;
1373 irdma_gen_ae(rf, qp, &info, false);
1374 }
1375
1376 /**
1377 * irdma_ieq_check_mpacrc - check if mpa crc is OK
1378 * @addr: address of buffer for crc
1379 * @len: length of buffer
1380 * @val: value to be compared
1381 */
irdma_ieq_check_mpacrc(const void * addr,u32 len,u32 val)1382 int irdma_ieq_check_mpacrc(const void *addr, u32 len, u32 val)
1383 {
1384 if ((__force u32)cpu_to_le32(~crc32c(~0, addr, len)) != val)
1385 return -EINVAL;
1386
1387 return 0;
1388 }
1389
1390 /**
1391 * irdma_ieq_get_qp - get qp based on quad in puda buffer
1392 * @dev: hardware control device structure
1393 * @buf: receive puda buffer on exception q
1394 */
irdma_ieq_get_qp(struct irdma_sc_dev * dev,struct irdma_puda_buf * buf)1395 struct irdma_sc_qp *irdma_ieq_get_qp(struct irdma_sc_dev *dev,
1396 struct irdma_puda_buf *buf)
1397 {
1398 struct irdma_qp *iwqp;
1399 struct irdma_cm_node *cm_node;
1400 struct irdma_device *iwdev = buf->vsi->back_vsi;
1401 u32 loc_addr[4] = {};
1402 u32 rem_addr[4] = {};
1403 u16 loc_port, rem_port;
1404 struct ipv6hdr *ip6h;
1405 struct iphdr *iph = (struct iphdr *)buf->iph;
1406 struct tcphdr *tcph = (struct tcphdr *)buf->tcph;
1407
1408 if (iph->version == 4) {
1409 loc_addr[0] = ntohl(iph->daddr);
1410 rem_addr[0] = ntohl(iph->saddr);
1411 } else {
1412 ip6h = (struct ipv6hdr *)buf->iph;
1413 irdma_copy_ip_ntohl(loc_addr, ip6h->daddr.in6_u.u6_addr32);
1414 irdma_copy_ip_ntohl(rem_addr, ip6h->saddr.in6_u.u6_addr32);
1415 }
1416 loc_port = ntohs(tcph->dest);
1417 rem_port = ntohs(tcph->source);
1418 cm_node = irdma_find_node(&iwdev->cm_core, rem_port, rem_addr, loc_port,
1419 loc_addr, buf->vlan_valid ? buf->vlan_id : 0xFFFF);
1420 if (!cm_node)
1421 return NULL;
1422
1423 iwqp = cm_node->iwqp;
1424 irdma_rem_ref_cm_node(cm_node);
1425
1426 return &iwqp->sc_qp;
1427 }
1428
1429 /**
1430 * irdma_send_ieq_ack - ACKs for duplicate or OOO partials FPDUs
1431 * @qp: qp ptr
1432 */
irdma_send_ieq_ack(struct irdma_sc_qp * qp)1433 void irdma_send_ieq_ack(struct irdma_sc_qp *qp)
1434 {
1435 struct irdma_cm_node *cm_node = ((struct irdma_qp *)qp->qp_uk.back_qp)->cm_node;
1436 struct irdma_puda_buf *buf = qp->pfpdu.lastrcv_buf;
1437 struct tcphdr *tcph = (struct tcphdr *)buf->tcph;
1438
1439 cm_node->tcp_cntxt.rcv_nxt = qp->pfpdu.nextseqnum;
1440 cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
1441
1442 irdma_send_ack(cm_node);
1443 }
1444
1445 /**
1446 * irdma_puda_ieq_get_ah_info - get AH info from IEQ buffer
1447 * @qp: qp pointer
1448 * @ah_info: AH info pointer
1449 */
irdma_puda_ieq_get_ah_info(struct irdma_sc_qp * qp,struct irdma_ah_info * ah_info)1450 void irdma_puda_ieq_get_ah_info(struct irdma_sc_qp *qp,
1451 struct irdma_ah_info *ah_info)
1452 {
1453 struct irdma_puda_buf *buf = qp->pfpdu.ah_buf;
1454 struct iphdr *iph;
1455 struct ipv6hdr *ip6h;
1456
1457 memset(ah_info, 0, sizeof(*ah_info));
1458 ah_info->do_lpbk = true;
1459 ah_info->vlan_tag = buf->vlan_id;
1460 ah_info->insert_vlan_tag = buf->vlan_valid;
1461 ah_info->ipv4_valid = buf->ipv4;
1462 ah_info->vsi = qp->vsi;
1463
1464 if (buf->smac_valid)
1465 ether_addr_copy(ah_info->mac_addr, buf->smac);
1466
1467 if (buf->ipv4) {
1468 ah_info->ipv4_valid = true;
1469 iph = (struct iphdr *)buf->iph;
1470 ah_info->hop_ttl = iph->ttl;
1471 ah_info->tc_tos = iph->tos;
1472 ah_info->dest_ip_addr[0] = ntohl(iph->daddr);
1473 ah_info->src_ip_addr[0] = ntohl(iph->saddr);
1474 } else {
1475 ip6h = (struct ipv6hdr *)buf->iph;
1476 ah_info->hop_ttl = ip6h->hop_limit;
1477 ah_info->tc_tos = ip6h->priority;
1478 irdma_copy_ip_ntohl(ah_info->dest_ip_addr,
1479 ip6h->daddr.in6_u.u6_addr32);
1480 irdma_copy_ip_ntohl(ah_info->src_ip_addr,
1481 ip6h->saddr.in6_u.u6_addr32);
1482 }
1483
1484 ah_info->dst_arpindex = irdma_arp_table(dev_to_rf(qp->dev),
1485 ah_info->dest_ip_addr,
1486 ah_info->ipv4_valid,
1487 NULL, IRDMA_ARP_RESOLVE);
1488 }
1489
1490 /**
1491 * irdma_gen1_ieq_update_tcpip_info - update tcpip in the buffer
1492 * @buf: puda to update
1493 * @len: length of buffer
1494 * @seqnum: seq number for tcp
1495 */
irdma_gen1_ieq_update_tcpip_info(struct irdma_puda_buf * buf,u16 len,u32 seqnum)1496 static void irdma_gen1_ieq_update_tcpip_info(struct irdma_puda_buf *buf,
1497 u16 len, u32 seqnum)
1498 {
1499 struct tcphdr *tcph;
1500 struct iphdr *iph;
1501 u16 iphlen;
1502 u16 pktsize;
1503 u8 *addr = buf->mem.va;
1504
1505 iphlen = (buf->ipv4) ? 20 : 40;
1506 iph = (struct iphdr *)(addr + buf->maclen);
1507 tcph = (struct tcphdr *)(addr + buf->maclen + iphlen);
1508 pktsize = len + buf->tcphlen + iphlen;
1509 iph->tot_len = htons(pktsize);
1510 tcph->seq = htonl(seqnum);
1511 }
1512
1513 /**
1514 * irdma_ieq_update_tcpip_info - update tcpip in the buffer
1515 * @buf: puda to update
1516 * @len: length of buffer
1517 * @seqnum: seq number for tcp
1518 */
irdma_ieq_update_tcpip_info(struct irdma_puda_buf * buf,u16 len,u32 seqnum)1519 void irdma_ieq_update_tcpip_info(struct irdma_puda_buf *buf, u16 len,
1520 u32 seqnum)
1521 {
1522 struct tcphdr *tcph;
1523 u8 *addr;
1524
1525 if (buf->vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
1526 return irdma_gen1_ieq_update_tcpip_info(buf, len, seqnum);
1527
1528 addr = buf->mem.va;
1529 tcph = (struct tcphdr *)addr;
1530 tcph->seq = htonl(seqnum);
1531 }
1532
1533 /**
1534 * irdma_gen1_puda_get_tcpip_info - get tcpip info from puda
1535 * buffer
1536 * @info: to get information
1537 * @buf: puda buffer
1538 */
irdma_gen1_puda_get_tcpip_info(struct irdma_puda_cmpl_info * info,struct irdma_puda_buf * buf)1539 static int irdma_gen1_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info,
1540 struct irdma_puda_buf *buf)
1541 {
1542 struct iphdr *iph;
1543 struct ipv6hdr *ip6h;
1544 struct tcphdr *tcph;
1545 u16 iphlen;
1546 u16 pkt_len;
1547 u8 *mem = buf->mem.va;
1548 struct ethhdr *ethh = buf->mem.va;
1549
1550 if (ethh->h_proto == htons(0x8100)) {
1551 info->vlan_valid = true;
1552 buf->vlan_id = ntohs(((struct vlan_ethhdr *)ethh)->h_vlan_TCI) &
1553 VLAN_VID_MASK;
1554 }
1555
1556 buf->maclen = (info->vlan_valid) ? 18 : 14;
1557 iphlen = (info->l3proto) ? 40 : 20;
1558 buf->ipv4 = (info->l3proto) ? false : true;
1559 buf->iph = mem + buf->maclen;
1560 iph = (struct iphdr *)buf->iph;
1561 buf->tcph = buf->iph + iphlen;
1562 tcph = (struct tcphdr *)buf->tcph;
1563
1564 if (buf->ipv4) {
1565 pkt_len = ntohs(iph->tot_len);
1566 } else {
1567 ip6h = (struct ipv6hdr *)buf->iph;
1568 pkt_len = ntohs(ip6h->payload_len) + iphlen;
1569 }
1570
1571 buf->totallen = pkt_len + buf->maclen;
1572
1573 if (info->payload_len < buf->totallen) {
1574 ibdev_dbg(to_ibdev(buf->vsi->dev),
1575 "ERR: payload_len = 0x%x totallen expected0x%x\n",
1576 info->payload_len, buf->totallen);
1577 return -EINVAL;
1578 }
1579
1580 buf->tcphlen = tcph->doff << 2;
1581 buf->datalen = pkt_len - iphlen - buf->tcphlen;
1582 buf->data = buf->datalen ? buf->tcph + buf->tcphlen : NULL;
1583 buf->hdrlen = buf->maclen + iphlen + buf->tcphlen;
1584 buf->seqnum = ntohl(tcph->seq);
1585
1586 return 0;
1587 }
1588
1589 /**
1590 * irdma_puda_get_tcpip_info - get tcpip info from puda buffer
1591 * @info: to get information
1592 * @buf: puda buffer
1593 */
irdma_puda_get_tcpip_info(struct irdma_puda_cmpl_info * info,struct irdma_puda_buf * buf)1594 int irdma_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info,
1595 struct irdma_puda_buf *buf)
1596 {
1597 struct tcphdr *tcph;
1598 u32 pkt_len;
1599 u8 *mem;
1600
1601 if (buf->vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
1602 return irdma_gen1_puda_get_tcpip_info(info, buf);
1603
1604 mem = buf->mem.va;
1605 buf->vlan_valid = info->vlan_valid;
1606 if (info->vlan_valid)
1607 buf->vlan_id = info->vlan;
1608
1609 buf->ipv4 = info->ipv4;
1610 if (buf->ipv4)
1611 buf->iph = mem + IRDMA_IPV4_PAD;
1612 else
1613 buf->iph = mem;
1614
1615 buf->tcph = mem + IRDMA_TCP_OFFSET;
1616 tcph = (struct tcphdr *)buf->tcph;
1617 pkt_len = info->payload_len;
1618 buf->totallen = pkt_len;
1619 buf->tcphlen = tcph->doff << 2;
1620 buf->datalen = pkt_len - IRDMA_TCP_OFFSET - buf->tcphlen;
1621 buf->data = buf->datalen ? buf->tcph + buf->tcphlen : NULL;
1622 buf->hdrlen = IRDMA_TCP_OFFSET + buf->tcphlen;
1623 buf->seqnum = ntohl(tcph->seq);
1624
1625 if (info->smac_valid) {
1626 ether_addr_copy(buf->smac, info->smac);
1627 buf->smac_valid = true;
1628 }
1629
1630 return 0;
1631 }
1632
1633 /**
1634 * irdma_hw_stats_timeout - Stats timer-handler which updates all HW stats
1635 * @t: timer_list pointer
1636 */
irdma_hw_stats_timeout(struct timer_list * t)1637 static void irdma_hw_stats_timeout(struct timer_list *t)
1638 {
1639 struct irdma_vsi_pestat *pf_devstat =
1640 timer_container_of(pf_devstat, t, stats_timer);
1641 struct irdma_sc_vsi *sc_vsi = pf_devstat->vsi;
1642
1643 if (sc_vsi->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
1644 irdma_cqp_gather_stats_cmd(sc_vsi->dev, sc_vsi->pestat, false);
1645 else
1646 irdma_cqp_gather_stats_gen1(sc_vsi->dev, sc_vsi->pestat);
1647
1648 mod_timer(&pf_devstat->stats_timer,
1649 jiffies + msecs_to_jiffies(STATS_TIMER_DELAY));
1650 }
1651
1652 /**
1653 * irdma_hw_stats_start_timer - Start periodic stats timer
1654 * @vsi: vsi structure pointer
1655 */
irdma_hw_stats_start_timer(struct irdma_sc_vsi * vsi)1656 void irdma_hw_stats_start_timer(struct irdma_sc_vsi *vsi)
1657 {
1658 struct irdma_vsi_pestat *devstat = vsi->pestat;
1659
1660 timer_setup(&devstat->stats_timer, irdma_hw_stats_timeout, 0);
1661 mod_timer(&devstat->stats_timer,
1662 jiffies + msecs_to_jiffies(STATS_TIMER_DELAY));
1663 }
1664
1665 /**
1666 * irdma_hw_stats_stop_timer - Delete periodic stats timer
1667 * @vsi: pointer to vsi structure
1668 */
irdma_hw_stats_stop_timer(struct irdma_sc_vsi * vsi)1669 void irdma_hw_stats_stop_timer(struct irdma_sc_vsi *vsi)
1670 {
1671 struct irdma_vsi_pestat *devstat = vsi->pestat;
1672
1673 timer_delete_sync(&devstat->stats_timer);
1674 }
1675
1676 /**
1677 * irdma_process_stats - Checking for wrap and update stats
1678 * @pestat: stats structure pointer
1679 */
irdma_process_stats(struct irdma_vsi_pestat * pestat)1680 static inline void irdma_process_stats(struct irdma_vsi_pestat *pestat)
1681 {
1682 sc_vsi_update_stats(pestat->vsi);
1683 }
1684
1685 /**
1686 * irdma_cqp_gather_stats_gen1 - Gather stats
1687 * @dev: pointer to device structure
1688 * @pestat: statistics structure
1689 */
irdma_cqp_gather_stats_gen1(struct irdma_sc_dev * dev,struct irdma_vsi_pestat * pestat)1690 void irdma_cqp_gather_stats_gen1(struct irdma_sc_dev *dev,
1691 struct irdma_vsi_pestat *pestat)
1692 {
1693 struct irdma_gather_stats *gather_stats =
1694 pestat->gather_info.gather_stats_va;
1695 const struct irdma_hw_stat_map *map = dev->hw_stats_map;
1696 u16 max_stats_idx = dev->hw_attrs.max_stat_idx;
1697 u32 stats_inst_offset_32;
1698 u32 stats_inst_offset_64;
1699 u64 new_val;
1700 u16 i;
1701
1702 stats_inst_offset_32 = (pestat->gather_info.use_stats_inst) ?
1703 pestat->gather_info.stats_inst_index :
1704 pestat->hw->hmc.hmc_fn_id;
1705 stats_inst_offset_32 *= 4;
1706 stats_inst_offset_64 = stats_inst_offset_32 * 2;
1707
1708 for (i = 0; i < max_stats_idx; i++) {
1709 if (map[i].bitmask <= IRDMA_MAX_STATS_32)
1710 new_val = rd32(dev->hw,
1711 dev->hw_stats_regs[i] + stats_inst_offset_32);
1712 else
1713 new_val = rd64(dev->hw,
1714 dev->hw_stats_regs[i] + stats_inst_offset_64);
1715 gather_stats->val[map[i].byteoff / sizeof(u64)] = new_val;
1716 }
1717
1718 irdma_process_stats(pestat);
1719 }
1720
1721 /**
1722 * irdma_process_cqp_stats - Checking for wrap and update stats
1723 * @cqp_request: cqp_request structure pointer
1724 */
irdma_process_cqp_stats(struct irdma_cqp_request * cqp_request)1725 static void irdma_process_cqp_stats(struct irdma_cqp_request *cqp_request)
1726 {
1727 struct irdma_vsi_pestat *pestat = cqp_request->param;
1728
1729 irdma_process_stats(pestat);
1730 }
1731
1732 /**
1733 * irdma_cqp_gather_stats_cmd - Gather stats
1734 * @dev: pointer to device structure
1735 * @pestat: pointer to stats info
1736 * @wait: flag to wait or not wait for stats
1737 */
irdma_cqp_gather_stats_cmd(struct irdma_sc_dev * dev,struct irdma_vsi_pestat * pestat,bool wait)1738 int irdma_cqp_gather_stats_cmd(struct irdma_sc_dev *dev,
1739 struct irdma_vsi_pestat *pestat, bool wait)
1740
1741 {
1742 struct irdma_pci_f *rf = dev_to_rf(dev);
1743 struct irdma_cqp *iwcqp = &rf->cqp;
1744 struct irdma_cqp_request *cqp_request;
1745 struct cqp_cmds_info *cqp_info;
1746 int status;
1747
1748 cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, wait);
1749 if (!cqp_request)
1750 return -ENOMEM;
1751
1752 cqp_info = &cqp_request->info;
1753 cqp_info->cqp_cmd = IRDMA_OP_STATS_GATHER;
1754 cqp_info->post_sq = 1;
1755 cqp_info->in.u.stats_gather.info = pestat->gather_info;
1756 cqp_info->in.u.stats_gather.scratch = (uintptr_t)cqp_request;
1757 cqp_info->in.u.stats_gather.cqp = &rf->cqp.sc_cqp;
1758 cqp_request->param = pestat;
1759 if (!wait)
1760 cqp_request->callback_fcn = irdma_process_cqp_stats;
1761 status = irdma_handle_cqp_op(rf, cqp_request);
1762 if (wait)
1763 irdma_process_stats(pestat);
1764 irdma_put_cqp_request(&rf->cqp, cqp_request);
1765
1766 return status;
1767 }
1768
1769 /**
1770 * irdma_cqp_stats_inst_cmd - Allocate/free stats instance
1771 * @vsi: pointer to vsi structure
1772 * @cmd: command to allocate or free
1773 * @stats_info: pointer to allocate stats info
1774 */
irdma_cqp_stats_inst_cmd(struct irdma_sc_vsi * vsi,u8 cmd,struct irdma_stats_inst_info * stats_info)1775 int irdma_cqp_stats_inst_cmd(struct irdma_sc_vsi *vsi, u8 cmd,
1776 struct irdma_stats_inst_info *stats_info)
1777 {
1778 struct irdma_pci_f *rf = dev_to_rf(vsi->dev);
1779 struct irdma_cqp *iwcqp = &rf->cqp;
1780 struct irdma_cqp_request *cqp_request;
1781 struct cqp_cmds_info *cqp_info;
1782 int status;
1783 bool wait = false;
1784
1785 if (cmd == IRDMA_OP_STATS_ALLOCATE)
1786 wait = true;
1787 cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, wait);
1788 if (!cqp_request)
1789 return -ENOMEM;
1790
1791 cqp_info = &cqp_request->info;
1792 cqp_info->cqp_cmd = cmd;
1793 cqp_info->post_sq = 1;
1794 cqp_info->in.u.stats_manage.info = *stats_info;
1795 cqp_info->in.u.stats_manage.scratch = (uintptr_t)cqp_request;
1796 cqp_info->in.u.stats_manage.cqp = &rf->cqp.sc_cqp;
1797 status = irdma_handle_cqp_op(rf, cqp_request);
1798 if (wait)
1799 stats_info->stats_idx = cqp_request->compl_info.op_ret_val;
1800 irdma_put_cqp_request(iwcqp, cqp_request);
1801
1802 return status;
1803 }
1804
1805 /**
1806 * irdma_cqp_ceq_cmd - Create/Destroy CEQ's after CEQ 0
1807 * @dev: pointer to device info
1808 * @sc_ceq: pointer to ceq structure
1809 * @op: Create or Destroy
1810 */
irdma_cqp_ceq_cmd(struct irdma_sc_dev * dev,struct irdma_sc_ceq * sc_ceq,u8 op)1811 int irdma_cqp_ceq_cmd(struct irdma_sc_dev *dev, struct irdma_sc_ceq *sc_ceq,
1812 u8 op)
1813 {
1814 struct irdma_cqp_request *cqp_request;
1815 struct cqp_cmds_info *cqp_info;
1816 struct irdma_pci_f *rf = dev_to_rf(dev);
1817 int status;
1818
1819 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
1820 if (!cqp_request)
1821 return -ENOMEM;
1822
1823 cqp_info = &cqp_request->info;
1824 cqp_info->post_sq = 1;
1825 cqp_info->cqp_cmd = op;
1826 cqp_info->in.u.ceq_create.ceq = sc_ceq;
1827 cqp_info->in.u.ceq_create.scratch = (uintptr_t)cqp_request;
1828
1829 status = irdma_handle_cqp_op(rf, cqp_request);
1830 irdma_put_cqp_request(&rf->cqp, cqp_request);
1831
1832 return status;
1833 }
1834
1835 /**
1836 * irdma_cqp_aeq_cmd - Create/Destroy AEQ
1837 * @dev: pointer to device info
1838 * @sc_aeq: pointer to aeq structure
1839 * @op: Create or Destroy
1840 */
irdma_cqp_aeq_cmd(struct irdma_sc_dev * dev,struct irdma_sc_aeq * sc_aeq,u8 op)1841 int irdma_cqp_aeq_cmd(struct irdma_sc_dev *dev, struct irdma_sc_aeq *sc_aeq,
1842 u8 op)
1843 {
1844 struct irdma_cqp_request *cqp_request;
1845 struct cqp_cmds_info *cqp_info;
1846 struct irdma_pci_f *rf = dev_to_rf(dev);
1847 int status;
1848
1849 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
1850 if (!cqp_request)
1851 return -ENOMEM;
1852
1853 cqp_info = &cqp_request->info;
1854 cqp_info->post_sq = 1;
1855 cqp_info->cqp_cmd = op;
1856 cqp_info->in.u.aeq_create.aeq = sc_aeq;
1857 cqp_info->in.u.aeq_create.scratch = (uintptr_t)cqp_request;
1858
1859 status = irdma_handle_cqp_op(rf, cqp_request);
1860 irdma_put_cqp_request(&rf->cqp, cqp_request);
1861
1862 return status;
1863 }
1864
1865 /**
1866 * irdma_cqp_ws_node_cmd - Add/modify/delete ws node
1867 * @dev: pointer to device structure
1868 * @cmd: Add, modify or delete
1869 * @node_info: pointer to ws node info
1870 */
irdma_cqp_ws_node_cmd(struct irdma_sc_dev * dev,u8 cmd,struct irdma_ws_node_info * node_info)1871 int irdma_cqp_ws_node_cmd(struct irdma_sc_dev *dev, u8 cmd,
1872 struct irdma_ws_node_info *node_info)
1873 {
1874 struct irdma_pci_f *rf = dev_to_rf(dev);
1875 struct irdma_cqp *iwcqp = &rf->cqp;
1876 struct irdma_sc_cqp *cqp = &iwcqp->sc_cqp;
1877 struct irdma_cqp_request *cqp_request;
1878 struct cqp_cmds_info *cqp_info;
1879 int status;
1880 bool poll;
1881
1882 if (!rf->sc_dev.ceq_valid)
1883 poll = true;
1884 else
1885 poll = false;
1886
1887 cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, !poll);
1888 if (!cqp_request)
1889 return -ENOMEM;
1890
1891 cqp_info = &cqp_request->info;
1892 cqp_info->cqp_cmd = cmd;
1893 cqp_info->post_sq = 1;
1894 cqp_info->in.u.ws_node.info = *node_info;
1895 cqp_info->in.u.ws_node.cqp = cqp;
1896 cqp_info->in.u.ws_node.scratch = (uintptr_t)cqp_request;
1897 status = irdma_handle_cqp_op(rf, cqp_request);
1898 if (status)
1899 goto exit;
1900
1901 if (poll) {
1902 struct irdma_ccq_cqe_info compl_info;
1903
1904 status = irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_WORK_SCHED_NODE,
1905 &compl_info);
1906 node_info->qs_handle = compl_info.op_ret_val;
1907 ibdev_dbg(&rf->iwdev->ibdev, "DCB: opcode=%d, compl_info.retval=%d\n",
1908 compl_info.op_code, compl_info.op_ret_val);
1909 } else {
1910 node_info->qs_handle = cqp_request->compl_info.op_ret_val;
1911 }
1912
1913 exit:
1914 irdma_put_cqp_request(&rf->cqp, cqp_request);
1915
1916 return status;
1917 }
1918
1919 /**
1920 * irdma_ah_cqp_op - perform an AH cqp operation
1921 * @rf: RDMA PCI function
1922 * @sc_ah: address handle
1923 * @cmd: AH operation
1924 * @wait: wait if true
1925 * @callback_fcn: Callback function on CQP op completion
1926 * @cb_param: parameter for callback function
1927 *
1928 * returns errno
1929 */
irdma_ah_cqp_op(struct irdma_pci_f * rf,struct irdma_sc_ah * sc_ah,u8 cmd,bool wait,void (* callback_fcn)(struct irdma_cqp_request *),void * cb_param)1930 int irdma_ah_cqp_op(struct irdma_pci_f *rf, struct irdma_sc_ah *sc_ah, u8 cmd,
1931 bool wait,
1932 void (*callback_fcn)(struct irdma_cqp_request *),
1933 void *cb_param)
1934 {
1935 struct irdma_cqp_request *cqp_request;
1936 struct cqp_cmds_info *cqp_info;
1937 int status;
1938
1939 if (cmd != IRDMA_OP_AH_CREATE && cmd != IRDMA_OP_AH_DESTROY)
1940 return -EINVAL;
1941
1942 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait);
1943 if (!cqp_request)
1944 return -ENOMEM;
1945
1946 cqp_info = &cqp_request->info;
1947 cqp_info->cqp_cmd = cmd;
1948 cqp_info->post_sq = 1;
1949 if (cmd == IRDMA_OP_AH_CREATE) {
1950 cqp_info->in.u.ah_create.info = sc_ah->ah_info;
1951 cqp_info->in.u.ah_create.scratch = (uintptr_t)cqp_request;
1952 cqp_info->in.u.ah_create.cqp = &rf->cqp.sc_cqp;
1953 } else if (cmd == IRDMA_OP_AH_DESTROY) {
1954 cqp_info->in.u.ah_destroy.info = sc_ah->ah_info;
1955 cqp_info->in.u.ah_destroy.scratch = (uintptr_t)cqp_request;
1956 cqp_info->in.u.ah_destroy.cqp = &rf->cqp.sc_cqp;
1957 }
1958
1959 if (!wait) {
1960 cqp_request->callback_fcn = callback_fcn;
1961 cqp_request->param = cb_param;
1962 }
1963 status = irdma_handle_cqp_op(rf, cqp_request);
1964 irdma_put_cqp_request(&rf->cqp, cqp_request);
1965
1966 if (status)
1967 return -ENOMEM;
1968
1969 if (wait)
1970 sc_ah->ah_info.ah_valid = (cmd == IRDMA_OP_AH_CREATE);
1971
1972 return 0;
1973 }
1974
1975 /**
1976 * irdma_ieq_ah_cb - callback after creation of AH for IEQ
1977 * @cqp_request: pointer to cqp_request of create AH
1978 */
irdma_ieq_ah_cb(struct irdma_cqp_request * cqp_request)1979 static void irdma_ieq_ah_cb(struct irdma_cqp_request *cqp_request)
1980 {
1981 struct irdma_sc_qp *qp = cqp_request->param;
1982 struct irdma_sc_ah *sc_ah = qp->pfpdu.ah;
1983 unsigned long flags;
1984
1985 spin_lock_irqsave(&qp->pfpdu.lock, flags);
1986 if (!cqp_request->compl_info.op_ret_val) {
1987 sc_ah->ah_info.ah_valid = true;
1988 irdma_ieq_process_fpdus(qp, qp->vsi->ieq);
1989 } else {
1990 sc_ah->ah_info.ah_valid = false;
1991 irdma_ieq_cleanup_qp(qp->vsi->ieq, qp);
1992 }
1993 spin_unlock_irqrestore(&qp->pfpdu.lock, flags);
1994 }
1995
1996 /**
1997 * irdma_ilq_ah_cb - callback after creation of AH for ILQ
1998 * @cqp_request: pointer to cqp_request of create AH
1999 */
irdma_ilq_ah_cb(struct irdma_cqp_request * cqp_request)2000 static void irdma_ilq_ah_cb(struct irdma_cqp_request *cqp_request)
2001 {
2002 struct irdma_cm_node *cm_node = cqp_request->param;
2003 struct irdma_sc_ah *sc_ah = cm_node->ah;
2004
2005 sc_ah->ah_info.ah_valid = !cqp_request->compl_info.op_ret_val;
2006 irdma_add_conn_est_qh(cm_node);
2007 }
2008
2009 /**
2010 * irdma_puda_create_ah - create AH for ILQ/IEQ qp's
2011 * @dev: device pointer
2012 * @ah_info: Address handle info
2013 * @wait: When true will wait for operation to complete
2014 * @type: ILQ/IEQ
2015 * @cb_param: Callback param when not waiting
2016 * @ah_ret: Returned pointer to address handle if created
2017 *
2018 */
irdma_puda_create_ah(struct irdma_sc_dev * dev,struct irdma_ah_info * ah_info,bool wait,enum puda_rsrc_type type,void * cb_param,struct irdma_sc_ah ** ah_ret)2019 int irdma_puda_create_ah(struct irdma_sc_dev *dev,
2020 struct irdma_ah_info *ah_info, bool wait,
2021 enum puda_rsrc_type type, void *cb_param,
2022 struct irdma_sc_ah **ah_ret)
2023 {
2024 struct irdma_sc_ah *ah;
2025 struct irdma_pci_f *rf = dev_to_rf(dev);
2026 int err;
2027
2028 ah = kzalloc_obj(*ah, GFP_ATOMIC);
2029 *ah_ret = ah;
2030 if (!ah)
2031 return -ENOMEM;
2032
2033 err = irdma_alloc_rsrc(rf, rf->allocated_ahs, rf->max_ah,
2034 &ah_info->ah_idx, &rf->next_ah);
2035 if (err)
2036 goto err_free;
2037
2038 ah->dev = dev;
2039 ah->ah_info = *ah_info;
2040
2041 if (type == IRDMA_PUDA_RSRC_TYPE_ILQ)
2042 err = irdma_ah_cqp_op(rf, ah, IRDMA_OP_AH_CREATE, wait,
2043 irdma_ilq_ah_cb, cb_param);
2044 else
2045 err = irdma_ah_cqp_op(rf, ah, IRDMA_OP_AH_CREATE, wait,
2046 irdma_ieq_ah_cb, cb_param);
2047
2048 if (err)
2049 goto error;
2050 return 0;
2051
2052 error:
2053 irdma_free_rsrc(rf, rf->allocated_ahs, ah->ah_info.ah_idx);
2054 err_free:
2055 kfree(ah);
2056 *ah_ret = NULL;
2057 return -ENOMEM;
2058 }
2059
2060 /**
2061 * irdma_puda_free_ah - free a puda address handle
2062 * @dev: device pointer
2063 * @ah: The address handle to free
2064 */
irdma_puda_free_ah(struct irdma_sc_dev * dev,struct irdma_sc_ah * ah)2065 void irdma_puda_free_ah(struct irdma_sc_dev *dev, struct irdma_sc_ah *ah)
2066 {
2067 struct irdma_pci_f *rf = dev_to_rf(dev);
2068
2069 if (!ah)
2070 return;
2071
2072 if (ah->ah_info.ah_valid) {
2073 irdma_ah_cqp_op(rf, ah, IRDMA_OP_AH_DESTROY, false, NULL, NULL);
2074 irdma_free_rsrc(rf, rf->allocated_ahs, ah->ah_info.ah_idx);
2075 }
2076
2077 kfree(ah);
2078 }
2079
2080 /**
2081 * irdma_gsi_ud_qp_ah_cb - callback after creation of AH for GSI/ID QP
2082 * @cqp_request: pointer to cqp_request of create AH
2083 */
irdma_gsi_ud_qp_ah_cb(struct irdma_cqp_request * cqp_request)2084 void irdma_gsi_ud_qp_ah_cb(struct irdma_cqp_request *cqp_request)
2085 {
2086 struct irdma_sc_ah *sc_ah = cqp_request->param;
2087
2088 if (!cqp_request->compl_info.op_ret_val)
2089 sc_ah->ah_info.ah_valid = true;
2090 else
2091 sc_ah->ah_info.ah_valid = false;
2092 }
2093
2094 /**
2095 * irdma_prm_add_pble_mem - add moemory to pble resources
2096 * @pprm: pble resource manager
2097 * @pchunk: chunk of memory to add
2098 */
irdma_prm_add_pble_mem(struct irdma_pble_prm * pprm,struct irdma_chunk * pchunk)2099 int irdma_prm_add_pble_mem(struct irdma_pble_prm *pprm,
2100 struct irdma_chunk *pchunk)
2101 {
2102 u64 sizeofbitmap;
2103
2104 if (pchunk->size & 0xfff)
2105 return -EINVAL;
2106
2107 sizeofbitmap = (u64)pchunk->size >> pprm->pble_shift;
2108
2109 pchunk->bitmapbuf = bitmap_zalloc(sizeofbitmap, GFP_KERNEL);
2110 if (!pchunk->bitmapbuf)
2111 return -ENOMEM;
2112
2113 pchunk->sizeofbitmap = sizeofbitmap;
2114 /* each pble is 8 bytes hence shift by 3 */
2115 pprm->total_pble_alloc += pchunk->size >> 3;
2116 pprm->free_pble_cnt += pchunk->size >> 3;
2117
2118 return 0;
2119 }
2120
2121 /**
2122 * irdma_prm_get_pbles - get pble's from prm
2123 * @pprm: pble resource manager
2124 * @chunkinfo: nformation about chunk where pble's were acquired
2125 * @mem_size: size of pble memory needed
2126 * @vaddr: returns virtual address of pble memory
2127 * @fpm_addr: returns fpm address of pble memory
2128 */
irdma_prm_get_pbles(struct irdma_pble_prm * pprm,struct irdma_pble_chunkinfo * chunkinfo,u64 mem_size,u64 ** vaddr,u64 * fpm_addr)2129 int irdma_prm_get_pbles(struct irdma_pble_prm *pprm,
2130 struct irdma_pble_chunkinfo *chunkinfo, u64 mem_size,
2131 u64 **vaddr, u64 *fpm_addr)
2132 {
2133 u64 bits_needed;
2134 u64 bit_idx = PBLE_INVALID_IDX;
2135 struct irdma_chunk *pchunk = NULL;
2136 struct list_head *chunk_entry = pprm->clist.next;
2137 u32 offset;
2138 unsigned long flags;
2139 *vaddr = NULL;
2140 *fpm_addr = 0;
2141
2142 bits_needed = DIV_ROUND_UP_ULL(mem_size, BIT_ULL(pprm->pble_shift));
2143
2144 spin_lock_irqsave(&pprm->prm_lock, flags);
2145 while (chunk_entry != &pprm->clist) {
2146 pchunk = (struct irdma_chunk *)chunk_entry;
2147 bit_idx = bitmap_find_next_zero_area(pchunk->bitmapbuf,
2148 pchunk->sizeofbitmap, 0,
2149 bits_needed, 0);
2150 if (bit_idx < pchunk->sizeofbitmap)
2151 break;
2152
2153 /* list.next used macro */
2154 chunk_entry = pchunk->list.next;
2155 }
2156
2157 if (!pchunk || bit_idx >= pchunk->sizeofbitmap) {
2158 spin_unlock_irqrestore(&pprm->prm_lock, flags);
2159 return -ENOMEM;
2160 }
2161
2162 bitmap_set(pchunk->bitmapbuf, bit_idx, bits_needed);
2163 offset = bit_idx << pprm->pble_shift;
2164 *vaddr = pchunk->vaddr + offset;
2165 *fpm_addr = pchunk->fpm_addr + offset;
2166
2167 chunkinfo->pchunk = pchunk;
2168 chunkinfo->bit_idx = bit_idx;
2169 chunkinfo->bits_used = bits_needed;
2170 /* 3 is sizeof pble divide */
2171 pprm->free_pble_cnt -= chunkinfo->bits_used << (pprm->pble_shift - 3);
2172 spin_unlock_irqrestore(&pprm->prm_lock, flags);
2173
2174 return 0;
2175 }
2176
2177 /**
2178 * irdma_prm_return_pbles - return pbles back to prm
2179 * @pprm: pble resource manager
2180 * @chunkinfo: chunk where pble's were acquired and to be freed
2181 */
irdma_prm_return_pbles(struct irdma_pble_prm * pprm,struct irdma_pble_chunkinfo * chunkinfo)2182 void irdma_prm_return_pbles(struct irdma_pble_prm *pprm,
2183 struct irdma_pble_chunkinfo *chunkinfo)
2184 {
2185 unsigned long flags;
2186
2187 spin_lock_irqsave(&pprm->prm_lock, flags);
2188 pprm->free_pble_cnt += chunkinfo->bits_used << (pprm->pble_shift - 3);
2189 bitmap_clear(chunkinfo->pchunk->bitmapbuf, chunkinfo->bit_idx,
2190 chunkinfo->bits_used);
2191 spin_unlock_irqrestore(&pprm->prm_lock, flags);
2192 }
2193
irdma_map_vm_page_list(struct irdma_hw * hw,void * va,dma_addr_t * pg_dma,u32 pg_cnt)2194 int irdma_map_vm_page_list(struct irdma_hw *hw, void *va, dma_addr_t *pg_dma,
2195 u32 pg_cnt)
2196 {
2197 struct page *vm_page;
2198 int i;
2199 u8 *addr;
2200
2201 addr = (u8 *)(uintptr_t)va;
2202 for (i = 0; i < pg_cnt; i++) {
2203 vm_page = vmalloc_to_page(addr);
2204 if (!vm_page)
2205 goto err;
2206
2207 pg_dma[i] = dma_map_page(hw->device, vm_page, 0, PAGE_SIZE,
2208 DMA_BIDIRECTIONAL);
2209 if (dma_mapping_error(hw->device, pg_dma[i]))
2210 goto err;
2211
2212 addr += PAGE_SIZE;
2213 }
2214
2215 return 0;
2216
2217 err:
2218 irdma_unmap_vm_page_list(hw, pg_dma, i);
2219 return -ENOMEM;
2220 }
2221
irdma_unmap_vm_page_list(struct irdma_hw * hw,dma_addr_t * pg_dma,u32 pg_cnt)2222 void irdma_unmap_vm_page_list(struct irdma_hw *hw, dma_addr_t *pg_dma, u32 pg_cnt)
2223 {
2224 int i;
2225
2226 for (i = 0; i < pg_cnt; i++)
2227 dma_unmap_page(hw->device, pg_dma[i], PAGE_SIZE, DMA_BIDIRECTIONAL);
2228 }
2229
2230 /**
2231 * irdma_pble_free_paged_mem - free virtual paged memory
2232 * @chunk: chunk to free with paged memory
2233 */
irdma_pble_free_paged_mem(struct irdma_chunk * chunk)2234 void irdma_pble_free_paged_mem(struct irdma_chunk *chunk)
2235 {
2236 if (!chunk->pg_cnt)
2237 goto done;
2238
2239 irdma_unmap_vm_page_list(chunk->dev->hw, chunk->dmainfo.dmaaddrs,
2240 chunk->pg_cnt);
2241
2242 done:
2243 kvfree(chunk->dmainfo.dmaaddrs);
2244 chunk->dmainfo.dmaaddrs = NULL;
2245 vfree(chunk->vaddr);
2246 chunk->vaddr = NULL;
2247 chunk->type = 0;
2248 }
2249
2250 /**
2251 * irdma_pble_get_paged_mem -allocate paged memory for pbles
2252 * @chunk: chunk to add for paged memory
2253 * @pg_cnt: number of pages needed
2254 */
irdma_pble_get_paged_mem(struct irdma_chunk * chunk,u32 pg_cnt)2255 int irdma_pble_get_paged_mem(struct irdma_chunk *chunk, u32 pg_cnt)
2256 {
2257 u32 size;
2258 void *va;
2259
2260 chunk->dmainfo.dmaaddrs = kvzalloc(pg_cnt << 3, GFP_KERNEL);
2261 if (!chunk->dmainfo.dmaaddrs)
2262 return -ENOMEM;
2263
2264 size = PAGE_SIZE * pg_cnt;
2265 va = vmalloc(size);
2266 if (!va)
2267 goto err;
2268
2269 if (irdma_map_vm_page_list(chunk->dev->hw, va, chunk->dmainfo.dmaaddrs,
2270 pg_cnt)) {
2271 vfree(va);
2272 goto err;
2273 }
2274 chunk->vaddr = va;
2275 chunk->size = size;
2276 chunk->pg_cnt = pg_cnt;
2277 chunk->type = PBLE_SD_PAGED;
2278
2279 return 0;
2280 err:
2281 kvfree(chunk->dmainfo.dmaaddrs);
2282 chunk->dmainfo.dmaaddrs = NULL;
2283
2284 return -ENOMEM;
2285 }
2286
2287 /**
2288 * irdma_alloc_ws_node_id - Allocate a tx scheduler node ID
2289 * @dev: device pointer
2290 */
irdma_alloc_ws_node_id(struct irdma_sc_dev * dev)2291 u16 irdma_alloc_ws_node_id(struct irdma_sc_dev *dev)
2292 {
2293 struct irdma_pci_f *rf = dev_to_rf(dev);
2294 u32 next = 1;
2295 u32 node_id;
2296
2297 if (irdma_alloc_rsrc(rf, rf->allocated_ws_nodes, rf->max_ws_node_id,
2298 &node_id, &next))
2299 return IRDMA_WS_NODE_INVALID;
2300
2301 return (u16)node_id;
2302 }
2303
2304 /**
2305 * irdma_free_ws_node_id - Free a tx scheduler node ID
2306 * @dev: device pointer
2307 * @node_id: Work scheduler node ID
2308 */
irdma_free_ws_node_id(struct irdma_sc_dev * dev,u16 node_id)2309 void irdma_free_ws_node_id(struct irdma_sc_dev *dev, u16 node_id)
2310 {
2311 struct irdma_pci_f *rf = dev_to_rf(dev);
2312
2313 irdma_free_rsrc(rf, rf->allocated_ws_nodes, (u32)node_id);
2314 }
2315
2316 /**
2317 * irdma_modify_qp_to_err - Modify a QP to error
2318 * @sc_qp: qp structure
2319 */
irdma_modify_qp_to_err(struct irdma_sc_qp * sc_qp)2320 void irdma_modify_qp_to_err(struct irdma_sc_qp *sc_qp)
2321 {
2322 struct irdma_qp *qp = sc_qp->qp_uk.back_qp;
2323 struct ib_qp_attr attr;
2324
2325 if (qp->iwdev->rf->reset)
2326 return;
2327 attr.qp_state = IB_QPS_ERR;
2328
2329 if (rdma_protocol_roce(qp->ibqp.device, 1))
2330 irdma_modify_qp_roce(&qp->ibqp, &attr, IB_QP_STATE, NULL);
2331 else
2332 irdma_modify_qp(&qp->ibqp, &attr, IB_QP_STATE, NULL);
2333 }
2334
irdma_ib_qp_event(struct irdma_qp * iwqp,enum irdma_qp_event_type event)2335 void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event)
2336 {
2337 struct ib_event ibevent;
2338
2339 if (!iwqp->ibqp.event_handler)
2340 return;
2341
2342 switch (event) {
2343 case IRDMA_QP_EVENT_CATASTROPHIC:
2344 ibevent.event = IB_EVENT_QP_FATAL;
2345 break;
2346 case IRDMA_QP_EVENT_ACCESS_ERR:
2347 ibevent.event = IB_EVENT_QP_ACCESS_ERR;
2348 break;
2349 case IRDMA_QP_EVENT_REQ_ERR:
2350 ibevent.event = IB_EVENT_QP_REQ_ERR;
2351 break;
2352 }
2353 ibevent.device = iwqp->ibqp.device;
2354 ibevent.element.qp = &iwqp->ibqp;
2355 iwqp->ibqp.event_handler(&ibevent, iwqp->ibqp.qp_context);
2356 }
2357
irdma_remove_cmpls_list(struct irdma_cq * iwcq)2358 void irdma_remove_cmpls_list(struct irdma_cq *iwcq)
2359 {
2360 struct irdma_cmpl_gen *cmpl_node;
2361 struct list_head *tmp_node, *list_node;
2362
2363 list_for_each_safe (list_node, tmp_node, &iwcq->cmpl_generated) {
2364 cmpl_node = list_entry(list_node, struct irdma_cmpl_gen, list);
2365 list_del(&cmpl_node->list);
2366 kfree(cmpl_node);
2367 }
2368 }
2369
irdma_generated_cmpls(struct irdma_cq * iwcq,struct irdma_cq_poll_info * cq_poll_info)2370 int irdma_generated_cmpls(struct irdma_cq *iwcq, struct irdma_cq_poll_info *cq_poll_info)
2371 {
2372 struct irdma_cmpl_gen *cmpl;
2373
2374 if (list_empty(&iwcq->cmpl_generated))
2375 return -ENOENT;
2376 cmpl = list_first_entry_or_null(&iwcq->cmpl_generated, struct irdma_cmpl_gen, list);
2377 list_del(&cmpl->list);
2378 memcpy(cq_poll_info, &cmpl->cpi, sizeof(*cq_poll_info));
2379 kfree(cmpl);
2380
2381 ibdev_dbg(iwcq->ibcq.device,
2382 "VERBS: %s: Poll artificially generated completion for QP 0x%X, op %u, wr_id=0x%llx\n",
2383 __func__, cq_poll_info->qp_id, cq_poll_info->op_type,
2384 cq_poll_info->wr_id);
2385
2386 return 0;
2387 }
2388
2389 /**
2390 * irdma_set_cpi_common_values - fill in values for polling info struct
2391 * @cpi: resulting structure of cq_poll_info type
2392 * @qp: QPair
2393 * @qp_num: id of the QP
2394 */
irdma_set_cpi_common_values(struct irdma_cq_poll_info * cpi,struct irdma_qp_uk * qp,u32 qp_num)2395 static void irdma_set_cpi_common_values(struct irdma_cq_poll_info *cpi,
2396 struct irdma_qp_uk *qp, u32 qp_num)
2397 {
2398 cpi->comp_status = IRDMA_COMPL_STATUS_FLUSHED;
2399 cpi->error = true;
2400 cpi->major_err = IRDMA_FLUSH_MAJOR_ERR;
2401 cpi->minor_err = FLUSH_GENERAL_ERR;
2402 cpi->qp_handle = (irdma_qp_handle)(uintptr_t)qp;
2403 cpi->qp_id = qp_num;
2404 }
2405
irdma_comp_handler(struct irdma_cq * cq)2406 static inline void irdma_comp_handler(struct irdma_cq *cq)
2407 {
2408 if (!cq->ibcq.comp_handler)
2409 return;
2410 if (atomic_cmpxchg(&cq->armed, 1, 0))
2411 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
2412 }
2413
irdma_generate_flush_completions(struct irdma_qp * iwqp)2414 void irdma_generate_flush_completions(struct irdma_qp *iwqp)
2415 {
2416 struct irdma_qp_uk *qp = &iwqp->sc_qp.qp_uk;
2417 struct irdma_ring *sq_ring = &qp->sq_ring;
2418 struct irdma_ring *rq_ring = &qp->rq_ring;
2419 struct irdma_cq *iwscq = iwqp->iwscq;
2420 struct irdma_cq *iwrcq = iwqp->iwrcq;
2421 struct irdma_cmpl_gen *cmpl;
2422 __le64 *sw_wqe;
2423 u64 wqe_qword;
2424 u32 wqe_idx;
2425 bool compl_generated = false;
2426 unsigned long flags1;
2427
2428 spin_lock_irqsave(&iwscq->lock, flags1);
2429 if (irdma_uk_cq_empty(&iwscq->sc_cq.cq_uk)) {
2430 unsigned long flags2;
2431
2432 spin_lock_irqsave(&iwqp->lock, flags2);
2433 while (IRDMA_RING_MORE_WORK(*sq_ring)) {
2434 cmpl = kzalloc_obj(*cmpl, GFP_ATOMIC);
2435 if (!cmpl) {
2436 spin_unlock_irqrestore(&iwqp->lock, flags2);
2437 spin_unlock_irqrestore(&iwscq->lock, flags1);
2438 return;
2439 }
2440
2441 wqe_idx = sq_ring->tail;
2442 irdma_set_cpi_common_values(&cmpl->cpi, qp, qp->qp_id);
2443
2444 cmpl->cpi.wr_id = qp->sq_wrtrk_array[wqe_idx].wrid;
2445 sw_wqe = qp->sq_base[wqe_idx].elem;
2446 get_64bit_val(sw_wqe, 24, &wqe_qword);
2447 cmpl->cpi.op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE, IRDMAQPSQ_OPCODE);
2448 cmpl->cpi.q_type = IRDMA_CQE_QTYPE_SQ;
2449 /* remove the SQ WR by moving SQ tail*/
2450 IRDMA_RING_SET_TAIL(*sq_ring,
2451 sq_ring->tail + qp->sq_wrtrk_array[sq_ring->tail].quanta);
2452 if (cmpl->cpi.op_type == IRDMAQP_OP_NOP) {
2453 kfree(cmpl);
2454 continue;
2455 }
2456 ibdev_dbg(iwscq->ibcq.device,
2457 "DEV: %s: adding wr_id = 0x%llx SQ Completion to list qp_id=%d\n",
2458 __func__, cmpl->cpi.wr_id, qp->qp_id);
2459 list_add_tail(&cmpl->list, &iwscq->cmpl_generated);
2460 compl_generated = true;
2461 }
2462 spin_unlock_irqrestore(&iwqp->lock, flags2);
2463 spin_unlock_irqrestore(&iwscq->lock, flags1);
2464 if (compl_generated)
2465 irdma_comp_handler(iwscq);
2466 } else {
2467 spin_unlock_irqrestore(&iwscq->lock, flags1);
2468 mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush,
2469 msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS));
2470 }
2471
2472 spin_lock_irqsave(&iwrcq->lock, flags1);
2473 if (irdma_uk_cq_empty(&iwrcq->sc_cq.cq_uk)) {
2474 unsigned long flags2;
2475
2476 spin_lock_irqsave(&iwqp->lock, flags2);
2477 while (IRDMA_RING_MORE_WORK(*rq_ring)) {
2478 cmpl = kzalloc_obj(*cmpl, GFP_ATOMIC);
2479 if (!cmpl) {
2480 spin_unlock_irqrestore(&iwqp->lock, flags2);
2481 spin_unlock_irqrestore(&iwrcq->lock, flags1);
2482 return;
2483 }
2484
2485 wqe_idx = rq_ring->tail;
2486 irdma_set_cpi_common_values(&cmpl->cpi, qp, qp->qp_id);
2487
2488 cmpl->cpi.wr_id = qp->rq_wrid_array[wqe_idx];
2489 cmpl->cpi.op_type = IRDMA_OP_TYPE_REC;
2490 cmpl->cpi.q_type = IRDMA_CQE_QTYPE_RQ;
2491 /* remove the RQ WR by moving RQ tail */
2492 IRDMA_RING_SET_TAIL(*rq_ring, rq_ring->tail + 1);
2493 ibdev_dbg(iwrcq->ibcq.device,
2494 "DEV: %s: adding wr_id = 0x%llx RQ Completion to list qp_id=%d, wqe_idx=%d\n",
2495 __func__, cmpl->cpi.wr_id, qp->qp_id,
2496 wqe_idx);
2497 list_add_tail(&cmpl->list, &iwrcq->cmpl_generated);
2498
2499 compl_generated = true;
2500 }
2501 spin_unlock_irqrestore(&iwqp->lock, flags2);
2502 spin_unlock_irqrestore(&iwrcq->lock, flags1);
2503 if (compl_generated)
2504 irdma_comp_handler(iwrcq);
2505 } else {
2506 spin_unlock_irqrestore(&iwrcq->lock, flags1);
2507 mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush,
2508 msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS));
2509 }
2510 }
2511