xref: /linux/drivers/infiniband/hw/hfi1/ipoib.h (revision 24168c5e6dfbdd5b414f048f47f75d64533296ca)
1 /* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
2 /*
3  * Copyright(c) 2020 Intel Corporation.
4  *
5  */
6 
7 /*
8  * This file contains HFI1 support for IPOIB functionality
9  */
10 
11 #ifndef HFI1_IPOIB_H
12 #define HFI1_IPOIB_H
13 
14 #include <linux/types.h>
15 #include <linux/stddef.h>
16 #include <linux/atomic.h>
17 #include <linux/netdevice.h>
18 #include <linux/slab.h>
19 #include <linux/skbuff.h>
20 #include <linux/list.h>
21 #include <linux/if_infiniband.h>
22 
23 #include "hfi.h"
24 #include "iowait.h"
25 #include "netdev.h"
26 
27 #include <rdma/ib_verbs.h>
28 
29 #define HFI1_IPOIB_ENTROPY_SHIFT   24
30 
31 #define HFI1_IPOIB_TXREQ_NAME_LEN   32
32 
33 #define HFI1_IPOIB_PSEUDO_LEN 20
34 #define HFI1_IPOIB_ENCAP_LEN 4
35 
36 struct hfi1_ipoib_dev_priv;
37 
38 union hfi1_ipoib_flow {
39 	u16 as_int;
40 	struct {
41 		u8 tx_queue;
42 		u8 sc5;
43 	} __attribute__((__packed__));
44 };
45 
46 /**
47  * struct ipoib_txreq - IPOIB transmit descriptor
48  * @txreq: sdma transmit request
49  * @sdma_hdr: 9b ib headers
50  * @sdma_status: status returned by sdma engine
51  * @complete: non-zero implies complete
52  * @priv: ipoib netdev private data
53  * @txq: txq on which skb was output
54  * @skb: skb to send
55  */
56 struct ipoib_txreq {
57 	struct sdma_txreq           txreq;
58 	struct hfi1_sdma_header     *sdma_hdr;
59 	int                         sdma_status;
60 	int                         complete;
61 	struct hfi1_ipoib_dev_priv *priv;
62 	struct hfi1_ipoib_txq      *txq;
63 	struct sk_buff             *skb;
64 };
65 
66 /**
67  * struct hfi1_ipoib_circ_buf - List of items to be processed
68  * @items: ring of items each a power of two size
69  * @max_items: max items + 1 that the ring can contain
70  * @shift: log2 of size for getting txreq
71  * @sent_txreqs: count of txreqs posted to sdma
72  * @tail: ring tail
73  * @stops: count of stops of queue
74  * @ring_full: ring has been filled
75  * @no_desc: descriptor shortage seen
76  * @complete_txreqs: count of txreqs completed by sdma
77  * @head: ring head
78  */
79 struct hfi1_ipoib_circ_buf {
80 	void *items;
81 	u32 max_items;
82 	u32 shift;
83 	/* consumer cache line */
84 	u64 ____cacheline_aligned_in_smp sent_txreqs;
85 	u32 avail;
86 	u32 tail;
87 	atomic_t stops;
88 	atomic_t ring_full;
89 	atomic_t no_desc;
90 	/* producer cache line */
91 	u64 ____cacheline_aligned_in_smp complete_txreqs;
92 	u32 head;
93 };
94 
95 /**
96  * struct hfi1_ipoib_txq - IPOIB per Tx queue information
97  * @priv: private pointer
98  * @sde: sdma engine
99  * @tx_list: tx request list
100  * @sent_txreqs: count of txreqs posted to sdma
101  * @flow: tracks when list needs to be flushed for a flow change
102  * @q_idx: ipoib Tx queue index
103  * @pkts_sent: indicator packets have been sent from this queue
104  * @wait: iowait structure
105  * @napi: pointer to tx napi interface
106  * @tx_ring: ring of ipoib txreqs to be reaped by napi callback
107  */
108 struct hfi1_ipoib_txq {
109 	struct napi_struct napi;
110 	struct hfi1_ipoib_dev_priv *priv;
111 	struct sdma_engine *sde;
112 	struct list_head tx_list;
113 	union hfi1_ipoib_flow flow;
114 	u8 q_idx;
115 	bool pkts_sent;
116 	struct iowait wait;
117 
118 	struct hfi1_ipoib_circ_buf ____cacheline_aligned_in_smp tx_ring;
119 };
120 
121 struct hfi1_ipoib_dev_priv {
122 	struct hfi1_devdata *dd;
123 	struct net_device   *netdev;
124 	struct ib_device    *device;
125 	struct hfi1_ipoib_txq *txqs;
126 	const struct net_device_ops *netdev_ops;
127 	struct rvt_qp *qp;
128 	u32 qkey;
129 	u16 pkey;
130 	u16 pkey_index;
131 	u8 port_num;
132 };
133 
134 /* hfi1 ipoib rdma netdev's private data structure */
135 struct hfi1_ipoib_rdma_netdev {
136 	struct rdma_netdev rn;  /* keep this first */
137 	/* followed by device private data */
138 	struct hfi1_ipoib_dev_priv dev_priv;
139 };
140 
141 static inline struct hfi1_ipoib_dev_priv *
142 hfi1_ipoib_priv(const struct net_device *dev)
143 {
144 	return &((struct hfi1_ipoib_rdma_netdev *)netdev_priv(dev))->dev_priv;
145 }
146 
147 int hfi1_ipoib_send(struct net_device *dev,
148 		    struct sk_buff *skb,
149 		    struct ib_ah *address,
150 		    u32 dqpn);
151 
152 int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv);
153 void hfi1_ipoib_txreq_deinit(struct hfi1_ipoib_dev_priv *priv);
154 
155 int hfi1_ipoib_rxq_init(struct net_device *dev);
156 void hfi1_ipoib_rxq_deinit(struct net_device *dev);
157 
158 void hfi1_ipoib_napi_tx_enable(struct net_device *dev);
159 void hfi1_ipoib_napi_tx_disable(struct net_device *dev);
160 
161 struct sk_buff *hfi1_ipoib_prepare_skb(struct hfi1_netdev_rxq *rxq,
162 				       int size, void *data);
163 
164 int hfi1_ipoib_rn_get_params(struct ib_device *device,
165 			     u32 port_num,
166 			     enum rdma_netdev_t type,
167 			     struct rdma_netdev_alloc_params *params);
168 
169 void hfi1_ipoib_tx_timeout(struct net_device *dev, unsigned int q);
170 
171 #endif /* _IPOIB_H */
172