xref: /linux/drivers/infiniband/hw/hfi1/netdev_rx.c (revision 53597deca0e38c30e6cd4ba2114fa42d2bcd85bb)
1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2 /*
3  * Copyright(c) 2020 Intel Corporation.
4  *
5  */
6 
7 /*
8  * This file contains HFI1 support for netdev RX functionality
9  */
10 
11 #include "sdma.h"
12 #include "verbs.h"
13 #include "netdev.h"
14 #include "hfi.h"
15 
16 #include <linux/netdevice.h>
17 #include <linux/etherdevice.h>
18 #include <rdma/ib_verbs.h>
19 
20 static int hfi1_netdev_setup_ctxt(struct hfi1_netdev_rx *rx,
21 				  struct hfi1_ctxtdata *uctxt)
22 {
23 	unsigned int rcvctrl_ops;
24 	struct hfi1_devdata *dd = rx->dd;
25 	int ret;
26 
27 	uctxt->rhf_rcv_function_map = netdev_rhf_rcv_functions;
28 	uctxt->do_interrupt = &handle_receive_interrupt_napi_sp;
29 
30 	/* Now allocate the RcvHdr queue and eager buffers. */
31 	ret = hfi1_create_rcvhdrq(dd, uctxt);
32 	if (ret)
33 		goto done;
34 
35 	ret = hfi1_setup_eagerbufs(uctxt);
36 	if (ret)
37 		goto done;
38 
39 	clear_rcvhdrtail(uctxt);
40 
41 	rcvctrl_ops = HFI1_RCVCTRL_CTXT_DIS;
42 	rcvctrl_ops |= HFI1_RCVCTRL_INTRAVAIL_DIS;
43 
44 	if (!HFI1_CAP_KGET_MASK(uctxt->flags, MULTI_PKT_EGR))
45 		rcvctrl_ops |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB;
46 	if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_EGR_FULL))
47 		rcvctrl_ops |= HFI1_RCVCTRL_NO_EGR_DROP_ENB;
48 	if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_RHQ_FULL))
49 		rcvctrl_ops |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB;
50 	if (HFI1_CAP_KGET_MASK(uctxt->flags, DMA_RTAIL))
51 		rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_ENB;
52 
53 	hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt);
54 done:
55 	return ret;
56 }
57 
58 static int hfi1_netdev_allocate_ctxt(struct hfi1_devdata *dd,
59 				     struct hfi1_ctxtdata **ctxt)
60 {
61 	struct hfi1_ctxtdata *uctxt;
62 	int ret;
63 
64 	if (dd->flags & HFI1_FROZEN)
65 		return -EIO;
66 
67 	ret = hfi1_create_ctxtdata(dd->pport, dd->node, &uctxt);
68 	if (ret < 0) {
69 		dd_dev_err(dd, "Unable to create ctxtdata, failing open\n");
70 		return -ENOMEM;
71 	}
72 
73 	uctxt->flags = HFI1_CAP_KGET(MULTI_PKT_EGR) |
74 		HFI1_CAP_KGET(NODROP_RHQ_FULL) |
75 		HFI1_CAP_KGET(NODROP_EGR_FULL) |
76 		HFI1_CAP_KGET(DMA_RTAIL);
77 	/* Netdev contexts are always NO_RDMA_RTAIL */
78 	uctxt->fast_handler = handle_receive_interrupt_napi_fp;
79 	uctxt->slow_handler = handle_receive_interrupt_napi_sp;
80 	hfi1_set_seq_cnt(uctxt, 1);
81 
82 	hfi1_stats.sps_ctxts++;
83 
84 	dd_dev_info(dd, "created netdev context %d\n", uctxt->ctxt);
85 	*ctxt = uctxt;
86 
87 	return 0;
88 }
89 
90 static void hfi1_netdev_deallocate_ctxt(struct hfi1_devdata *dd,
91 					struct hfi1_ctxtdata *uctxt)
92 {
93 	flush_wc();
94 
95 	/*
96 	 * Disable receive context and interrupt available, reset all
97 	 * RcvCtxtCtrl bits to default values.
98 	 */
99 	hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS |
100 		     HFI1_RCVCTRL_TIDFLOW_DIS |
101 		     HFI1_RCVCTRL_INTRAVAIL_DIS |
102 		     HFI1_RCVCTRL_ONE_PKT_EGR_DIS |
103 		     HFI1_RCVCTRL_NO_RHQ_DROP_DIS |
104 		     HFI1_RCVCTRL_NO_EGR_DROP_DIS, uctxt);
105 
106 	if (uctxt->msix_intr != CCE_NUM_MSIX_VECTORS)
107 		msix_free_irq(dd, uctxt->msix_intr);
108 
109 	uctxt->msix_intr = CCE_NUM_MSIX_VECTORS;
110 	uctxt->event_flags = 0;
111 
112 	hfi1_clear_tids(uctxt);
113 	hfi1_clear_ctxt_pkey(dd, uctxt);
114 
115 	hfi1_stats.sps_ctxts--;
116 
117 	hfi1_free_ctxt(uctxt);
118 }
119 
120 static int hfi1_netdev_allot_ctxt(struct hfi1_netdev_rx *rx,
121 				  struct hfi1_ctxtdata **ctxt)
122 {
123 	int rc;
124 	struct hfi1_devdata *dd = rx->dd;
125 
126 	rc = hfi1_netdev_allocate_ctxt(dd, ctxt);
127 	if (rc) {
128 		dd_dev_err(dd, "netdev ctxt alloc failed %d\n", rc);
129 		return rc;
130 	}
131 
132 	rc = hfi1_netdev_setup_ctxt(rx, *ctxt);
133 	if (rc) {
134 		dd_dev_err(dd, "netdev ctxt setup failed %d\n", rc);
135 		hfi1_netdev_deallocate_ctxt(dd, *ctxt);
136 		*ctxt = NULL;
137 	}
138 
139 	return rc;
140 }
141 
142 /**
143  * hfi1_num_netdev_contexts - Count of netdev recv contexts to use.
144  * @dd: device on which to allocate netdev contexts
145  * @available_contexts: count of available receive contexts
146  * @cpu_mask: mask of possible cpus to include for contexts
147  *
148  * Return: count of physical cores on a node or the remaining available recv
149  * contexts for netdev recv context usage up to the maximum of
150  * HFI1_MAX_NETDEV_CTXTS.
151  * A value of 0 can be returned when acceleration is explicitly turned off,
152  * a memory allocation error occurs or when there are no available contexts.
153  *
154  */
155 u32 hfi1_num_netdev_contexts(struct hfi1_devdata *dd, u32 available_contexts,
156 			     struct cpumask *cpu_mask)
157 {
158 	cpumask_var_t node_cpu_mask;
159 	unsigned int available_cpus;
160 
161 	if (!HFI1_CAP_IS_KSET(AIP))
162 		return 0;
163 
164 	/* Always give user contexts priority over netdev contexts */
165 	if (available_contexts == 0) {
166 		dd_dev_info(dd, "No receive contexts available for netdevs.\n");
167 		return 0;
168 	}
169 
170 	if (!zalloc_cpumask_var(&node_cpu_mask, GFP_KERNEL)) {
171 		dd_dev_err(dd, "Unable to allocate cpu_mask for netdevs.\n");
172 		return 0;
173 	}
174 
175 	cpumask_and(node_cpu_mask, cpu_mask, cpumask_of_node(dd->node));
176 
177 	available_cpus = cpumask_weight(node_cpu_mask);
178 
179 	free_cpumask_var(node_cpu_mask);
180 
181 	return min3(available_cpus, available_contexts,
182 		    (u32)HFI1_MAX_NETDEV_CTXTS);
183 }
184 
185 static int hfi1_netdev_rxq_init(struct hfi1_netdev_rx *rx)
186 {
187 	int i;
188 	int rc;
189 	struct hfi1_devdata *dd = rx->dd;
190 	struct net_device *dev = rx->rx_napi;
191 
192 	rx->num_rx_q = dd->num_netdev_contexts;
193 	rx->rxq = kcalloc_node(rx->num_rx_q, sizeof(*rx->rxq),
194 			       GFP_KERNEL, dd->node);
195 
196 	if (!rx->rxq) {
197 		dd_dev_err(dd, "Unable to allocate netdev queue data\n");
198 		return (-ENOMEM);
199 	}
200 
201 	for (i = 0; i < rx->num_rx_q; i++) {
202 		struct hfi1_netdev_rxq *rxq = &rx->rxq[i];
203 
204 		rc = hfi1_netdev_allot_ctxt(rx, &rxq->rcd);
205 		if (rc)
206 			goto bail_context_irq_failure;
207 
208 		hfi1_rcd_get(rxq->rcd);
209 		rxq->rx = rx;
210 		rxq->rcd->napi = &rxq->napi;
211 		dd_dev_info(dd, "Setting rcv queue %d napi to context %d\n",
212 			    i, rxq->rcd->ctxt);
213 		/*
214 		 * Disable BUSY_POLL on this NAPI as this is not supported
215 		 * right now.
216 		 */
217 		set_bit(NAPI_STATE_NO_BUSY_POLL, &rxq->napi.state);
218 		netif_napi_add(dev, &rxq->napi, hfi1_netdev_rx_napi);
219 		rc = msix_netdev_request_rcd_irq(rxq->rcd);
220 		if (rc)
221 			goto bail_context_irq_failure;
222 	}
223 
224 	return 0;
225 
226 bail_context_irq_failure:
227 	dd_dev_err(dd, "Unable to allot receive context\n");
228 	for (; i >= 0; i--) {
229 		struct hfi1_netdev_rxq *rxq = &rx->rxq[i];
230 
231 		if (rxq->rcd) {
232 			hfi1_netdev_deallocate_ctxt(dd, rxq->rcd);
233 			hfi1_rcd_put(rxq->rcd);
234 			rxq->rcd = NULL;
235 		}
236 	}
237 	kfree(rx->rxq);
238 	rx->rxq = NULL;
239 
240 	return rc;
241 }
242 
243 static void hfi1_netdev_rxq_deinit(struct hfi1_netdev_rx *rx)
244 {
245 	int i;
246 	struct hfi1_devdata *dd = rx->dd;
247 
248 	for (i = 0; i < rx->num_rx_q; i++) {
249 		struct hfi1_netdev_rxq *rxq = &rx->rxq[i];
250 
251 		netif_napi_del(&rxq->napi);
252 		hfi1_netdev_deallocate_ctxt(dd, rxq->rcd);
253 		hfi1_rcd_put(rxq->rcd);
254 		rxq->rcd = NULL;
255 	}
256 
257 	kfree(rx->rxq);
258 	rx->rxq = NULL;
259 	rx->num_rx_q = 0;
260 }
261 
262 static void enable_queues(struct hfi1_netdev_rx *rx)
263 {
264 	int i;
265 
266 	for (i = 0; i < rx->num_rx_q; i++) {
267 		struct hfi1_netdev_rxq *rxq = &rx->rxq[i];
268 
269 		dd_dev_info(rx->dd, "enabling queue %d on context %d\n", i,
270 			    rxq->rcd->ctxt);
271 		napi_enable(&rxq->napi);
272 		hfi1_rcvctrl(rx->dd,
273 			     HFI1_RCVCTRL_CTXT_ENB | HFI1_RCVCTRL_INTRAVAIL_ENB,
274 			     rxq->rcd);
275 	}
276 }
277 
278 static void disable_queues(struct hfi1_netdev_rx *rx)
279 {
280 	int i;
281 
282 	msix_netdev_synchronize_irq(rx->dd);
283 
284 	for (i = 0; i < rx->num_rx_q; i++) {
285 		struct hfi1_netdev_rxq *rxq = &rx->rxq[i];
286 
287 		dd_dev_info(rx->dd, "disabling queue %d on context %d\n", i,
288 			    rxq->rcd->ctxt);
289 
290 		/* wait for napi if it was scheduled */
291 		hfi1_rcvctrl(rx->dd,
292 			     HFI1_RCVCTRL_CTXT_DIS | HFI1_RCVCTRL_INTRAVAIL_DIS,
293 			     rxq->rcd);
294 		napi_synchronize(&rxq->napi);
295 		napi_disable(&rxq->napi);
296 	}
297 }
298 
299 /**
300  * hfi1_netdev_rx_init - Incrememnts netdevs counter. When called first time,
301  * it allocates receive queue data and calls netif_napi_add
302  * for each queue.
303  *
304  * @dd: hfi1 dev data
305  */
306 int hfi1_netdev_rx_init(struct hfi1_devdata *dd)
307 {
308 	struct hfi1_netdev_rx *rx = dd->netdev_rx;
309 	int res;
310 
311 	if (atomic_fetch_inc(&rx->netdevs))
312 		return 0;
313 
314 	mutex_lock(&hfi1_mutex);
315 	res = hfi1_netdev_rxq_init(rx);
316 	mutex_unlock(&hfi1_mutex);
317 	return res;
318 }
319 
320 /**
321  * hfi1_netdev_rx_destroy - Decrements netdevs counter, when it reaches 0
322  * napi is deleted and receive queses memory is freed.
323  *
324  * @dd: hfi1 dev data
325  */
326 int hfi1_netdev_rx_destroy(struct hfi1_devdata *dd)
327 {
328 	struct hfi1_netdev_rx *rx = dd->netdev_rx;
329 
330 	/* destroy the RX queues only if it is the last netdev going away */
331 	if (atomic_fetch_add_unless(&rx->netdevs, -1, 0) == 1) {
332 		mutex_lock(&hfi1_mutex);
333 		hfi1_netdev_rxq_deinit(rx);
334 		mutex_unlock(&hfi1_mutex);
335 	}
336 
337 	return 0;
338 }
339 
340 /**
341  * hfi1_alloc_rx - Allocates the rx support structure
342  * @dd: hfi1 dev data
343  *
344  * Allocate the rx structure to support gathering the receive
345  * resources and the dummy netdev.
346  *
347  * Updates dd struct pointer upon success.
348  *
349  * Return: 0 (success) -error on failure
350  *
351  */
352 int hfi1_alloc_rx(struct hfi1_devdata *dd)
353 {
354 	struct hfi1_netdev_rx *rx;
355 
356 	dd_dev_info(dd, "allocating rx size %ld\n", sizeof(*rx));
357 	rx = kzalloc_node(sizeof(*rx), GFP_KERNEL, dd->node);
358 
359 	if (!rx)
360 		return -ENOMEM;
361 	rx->dd = dd;
362 	rx->rx_napi = alloc_netdev_dummy(0);
363 	if (!rx->rx_napi) {
364 		kfree(rx);
365 		return -ENOMEM;
366 	}
367 
368 	xa_init(&rx->dev_tbl);
369 	atomic_set(&rx->enabled, 0);
370 	atomic_set(&rx->netdevs, 0);
371 	dd->netdev_rx = rx;
372 
373 	return 0;
374 }
375 
376 void hfi1_free_rx(struct hfi1_devdata *dd)
377 {
378 	if (dd->netdev_rx) {
379 		dd_dev_info(dd, "hfi1 rx freed\n");
380 		free_netdev(dd->netdev_rx->rx_napi);
381 		kfree(dd->netdev_rx);
382 		dd->netdev_rx = NULL;
383 	}
384 }
385 
386 /**
387  * hfi1_netdev_enable_queues - This is napi enable function.
388  * It enables napi objects associated with queues.
389  * When at least one device has called it it increments atomic counter.
390  * Disable function decrements counter and when it is 0,
391  * calls napi_disable for every queue.
392  *
393  * @dd: hfi1 dev data
394  */
395 void hfi1_netdev_enable_queues(struct hfi1_devdata *dd)
396 {
397 	struct hfi1_netdev_rx *rx;
398 
399 	if (!dd->netdev_rx)
400 		return;
401 
402 	rx = dd->netdev_rx;
403 	if (atomic_fetch_inc(&rx->enabled))
404 		return;
405 
406 	mutex_lock(&hfi1_mutex);
407 	enable_queues(rx);
408 	mutex_unlock(&hfi1_mutex);
409 }
410 
411 void hfi1_netdev_disable_queues(struct hfi1_devdata *dd)
412 {
413 	struct hfi1_netdev_rx *rx;
414 
415 	if (!dd->netdev_rx)
416 		return;
417 
418 	rx = dd->netdev_rx;
419 	if (atomic_dec_if_positive(&rx->enabled))
420 		return;
421 
422 	mutex_lock(&hfi1_mutex);
423 	disable_queues(rx);
424 	mutex_unlock(&hfi1_mutex);
425 }
426 
427 /**
428  * hfi1_netdev_add_data - Registers data with unique identifier
429  * to be requested later this is needed for IPoIB VLANs
430  * implementations.
431  * This call is protected by mutex idr_lock.
432  *
433  * @dd: hfi1 dev data
434  * @id: requested integer id up to INT_MAX
435  * @data: data to be associated with index
436  */
437 int hfi1_netdev_add_data(struct hfi1_devdata *dd, int id, void *data)
438 {
439 	struct hfi1_netdev_rx *rx = dd->netdev_rx;
440 
441 	return xa_insert(&rx->dev_tbl, id, data, GFP_NOWAIT);
442 }
443 
444 /**
445  * hfi1_netdev_remove_data - Removes data with previously given id.
446  * Returns the reference to removed entry.
447  *
448  * @dd: hfi1 dev data
449  * @id: requested integer id up to INT_MAX
450  */
451 void *hfi1_netdev_remove_data(struct hfi1_devdata *dd, int id)
452 {
453 	struct hfi1_netdev_rx *rx = dd->netdev_rx;
454 
455 	return xa_erase(&rx->dev_tbl, id);
456 }
457 
458 /**
459  * hfi1_netdev_get_data - Gets data with given id
460  *
461  * @dd: hfi1 dev data
462  * @id: requested integer id up to INT_MAX
463  */
464 void *hfi1_netdev_get_data(struct hfi1_devdata *dd, int id)
465 {
466 	struct hfi1_netdev_rx *rx = dd->netdev_rx;
467 
468 	return xa_load(&rx->dev_tbl, id);
469 }
470 
471 /**
472  * hfi1_netdev_get_first_data - Gets first entry with greater or equal id.
473  *
474  * @dd: hfi1 dev data
475  * @start_id: requested integer id up to INT_MAX
476  */
477 void *hfi1_netdev_get_first_data(struct hfi1_devdata *dd, int *start_id)
478 {
479 	struct hfi1_netdev_rx *rx = dd->netdev_rx;
480 	unsigned long index = *start_id;
481 	void *ret;
482 
483 	ret = xa_find(&rx->dev_tbl, &index, UINT_MAX, XA_PRESENT);
484 	*start_id = (int)index;
485 	return ret;
486 }
487