1 /*-
2 * Copyright (c) 2010 Isilon Systems, Inc.
3 * Copyright (c) 2010 iX Systems, Inc.
4 * Copyright (c) 2010 Panasas, Inc.
5 * Copyright (c) 2013-2019 Mellanox Technologies, Ltd.
6 * All rights reserved.
7 * Copyright (c) 2020-2025 The FreeBSD Foundation
8 * Copyright (c) 2020-2022 Bjoern A. Zeeb
9 *
10 * Portions of this software were developed by Björn Zeeb
11 * under sponsorship from the FreeBSD Foundation.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice unmodified, this list of conditions, and the following
18 * disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34 #ifndef _LINUXKPI_LINUX_NETDEVICE_H
35 #define _LINUXKPI_LINUX_NETDEVICE_H
36
37 #include <linux/types.h>
38 #include <linux/netdev_features.h>
39
40 #include <sys/param.h>
41 #include <sys/kernel.h>
42 #include <sys/lock.h>
43 #include <sys/mutex.h>
44 #include <sys/malloc.h>
45 #include <sys/queue.h>
46 #include <sys/socket.h>
47 #include <sys/taskqueue.h>
48
49 #include <net/if_types.h>
50 #include <net/if.h>
51 #include <net/if_var.h>
52 #include <net/if_dl.h>
53
54 #include <linux/kernel.h>
55 #include <linux/bitops.h>
56 #include <linux/list.h>
57 #include <linux/device.h>
58 #include <linux/net.h>
59 #include <linux/if_ether.h>
60 #include <linux/notifier.h>
61 #include <linux/random.h>
62 #include <linux/rcupdate.h>
63
64 #ifdef VIMAGE
65 #define init_net *vnet0
66 #else
67 #define init_net *((struct vnet *)0)
68 #endif
69
70 struct sk_buff;
71 struct net_device;
72 struct wireless_dev; /* net/cfg80211.h */
73
74 #define MAX_ADDR_LEN 20
75
76 #define NET_NAME_UNKNOWN 0
77
78 enum net_addr_assign_type {
79 NET_ADDR_RANDOM,
80 };
81
82 enum netdev_tx {
83 NETDEV_TX_OK = 0,
84 };
85 typedef enum netdev_tx netdev_tx_t;
86
87 struct netdev_hw_addr {
88 struct list_head addr_list;
89 uint8_t addr[MAX_ADDR_LEN];
90 };
91
92 struct netdev_hw_addr_list {
93 struct list_head addr_list;
94 int count;
95 };
96
97 enum net_device_reg_state {
98 NETREG_DUMMY = 1,
99 NETREG_REGISTERED,
100 };
101
102 enum tc_setup_type {
103 TC_SETUP_MAX_DUMMY,
104 };
105
106 struct net_device_ops {
107 int (*ndo_open)(struct net_device *);
108 int (*ndo_stop)(struct net_device *);
109 int (*ndo_set_mac_address)(struct net_device *, void *);
110 netdev_tx_t (*ndo_start_xmit)(struct sk_buff *, struct net_device *);
111 void (*ndo_set_rx_mode)(struct net_device *);
112 };
113
114 struct net_device {
115 /* net_device fields seen publicly. */
116 /* XXX can we later make some aliases to ifnet? */
117 char name[IFNAMSIZ];
118 struct wireless_dev *ieee80211_ptr;
119 uint8_t dev_addr[ETH_ALEN];
120 struct netdev_hw_addr_list mc;
121 netdev_features_t features;
122 struct {
123 unsigned long multicast;
124
125 unsigned long rx_bytes;
126 unsigned long rx_errors;
127 unsigned long rx_packets;
128 unsigned long tx_bytes;
129 unsigned long tx_dropped;
130 unsigned long tx_errors;
131 unsigned long tx_packets;
132 } stats;
133 enum net_addr_assign_type addr_assign_type;
134 enum net_device_reg_state reg_state;
135 const struct ethtool_ops *ethtool_ops;
136 const struct net_device_ops *netdev_ops;
137
138 bool needs_free_netdev;
139 /* Not properly typed as-of now. */
140 int flags, type;
141 int name_assign_type, needed_headroom;
142 int threaded;
143
144 void (*priv_destructor)(struct net_device *);
145
146 /* net_device internal. */
147 struct device dev;
148
149 /*
150 * In case we delete the net_device we need to be able to clear all
151 * NAPI consumers.
152 */
153 struct mtx napi_mtx;
154 TAILQ_HEAD(, napi_struct) napi_head;
155 struct taskqueue *napi_tq;
156
157 /* Must stay last. */
158 uint8_t drv_priv[0] __aligned(CACHE_LINE_SIZE);
159 };
160
161 #define SET_NETDEV_DEV(_ndev, _dev) (_ndev)->dev.parent = _dev;
162
163 enum net_device_path_type {
164 DEV_PATH_MTK_WDMA,
165 };
166
167 struct net_device_path {
168 enum net_device_path_type type;
169 const struct net_device *dev;
170 /* We assume there's a struct per type. */
171 union {
172 struct {
173 uint16_t wcid;
174 uint8_t wdma_idx;
175 uint8_t queue;
176 uint8_t bss;
177 uint8_t amsdu;
178 } mtk_wdma;
179 };
180 };
181
182 struct net_device_path_ctx {
183 const struct net_device *dev;
184 };
185
186
187 /* -------------------------------------------------------------------------- */
188 /* According to linux::ipoib_main.c. */
189 struct netdev_notifier_info {
190 struct net_device *dev;
191 struct ifnet *ifp;
192 };
193
194 static inline struct net_device *
netdev_notifier_info_to_dev(struct netdev_notifier_info * ni)195 netdev_notifier_info_to_dev(struct netdev_notifier_info *ni)
196 {
197 return (ni->dev);
198 }
199
200 static inline struct ifnet *
netdev_notifier_info_to_ifp(struct netdev_notifier_info * ni)201 netdev_notifier_info_to_ifp(struct netdev_notifier_info *ni)
202 {
203 return (ni->ifp);
204 }
205
206 int register_netdevice_notifier(struct notifier_block *);
207 int register_inetaddr_notifier(struct notifier_block *);
208 int unregister_netdevice_notifier(struct notifier_block *);
209 int unregister_inetaddr_notifier(struct notifier_block *);
210
211 /* -------------------------------------------------------------------------- */
212
213 #define NAPI_POLL_WEIGHT 64 /* budget */
214
215 /*
216 * There are drivers directly testing napi state bits, so we need to publicly
217 * expose them. If you ask me, those accesses should be hid behind an
218 * inline function and the bit flags not be directly exposed.
219 */
220 enum napi_state_bits {
221 /*
222 * Official Linux flags encountered.
223 */
224 NAPI_STATE_SCHED = 1,
225
226 /*
227 * Our internal versions (for now).
228 */
229 /* Do not schedule new things while we are waiting to clear things. */
230 LKPI_NAPI_FLAG_DISABLE_PENDING = 0,
231 /* To synchronise that only one poll is ever running. */
232 LKPI_NAPI_FLAG_IS_SCHEDULED = 1,
233 /* If trying to schedule while poll is running. Need to re-schedule. */
234 LKPI_NAPI_FLAG_LOST_RACE_TRY_AGAIN = 2,
235 /* When shutting down forcefully prevent anything from running task/poll. */
236 LKPI_NAPI_FLAG_SHUTDOWN = 3,
237 };
238
239 struct napi_struct {
240 TAILQ_ENTRY(napi_struct) entry;
241
242 struct list_head rx_list;
243 struct net_device *dev;
244 int (*poll)(struct napi_struct *, int);
245 int budget;
246 int rx_count;
247
248
249 /*
250 * These flags mostly need to be checked/changed atomically
251 * (multiple together in some cases).
252 */
253 volatile unsigned long state;
254
255 /* FreeBSD internal. */
256 /* Use task for now, so we can easily switch between direct and task. */
257 struct task napi_task;
258 };
259
260 void linuxkpi_init_dummy_netdev(struct net_device *);
261 void linuxkpi_netif_napi_add(struct net_device *, struct napi_struct *,
262 int(*napi_poll)(struct napi_struct *, int));
263 void linuxkpi_netif_napi_del(struct napi_struct *);
264 bool linuxkpi_napi_schedule_prep(struct napi_struct *);
265 void linuxkpi___napi_schedule(struct napi_struct *);
266 bool linuxkpi_napi_schedule(struct napi_struct *);
267 void linuxkpi_napi_reschedule(struct napi_struct *);
268 bool linuxkpi_napi_complete_done(struct napi_struct *, int);
269 bool linuxkpi_napi_complete(struct napi_struct *);
270 void linuxkpi_napi_disable(struct napi_struct *);
271 void linuxkpi_napi_enable(struct napi_struct *);
272 void linuxkpi_napi_synchronize(struct napi_struct *);
273
274 #define init_dummy_netdev(_n) \
275 linuxkpi_init_dummy_netdev(_n)
276 #define netif_napi_add(_nd, _ns, _p) \
277 linuxkpi_netif_napi_add(_nd, _ns, _p)
278 #define netif_napi_del(_n) \
279 linuxkpi_netif_napi_del(_n)
280 #define napi_schedule_prep(_n) \
281 linuxkpi_napi_schedule_prep(_n)
282 #define __napi_schedule(_n) \
283 linuxkpi___napi_schedule(_n)
284 #define napi_schedule(_n) \
285 linuxkpi_napi_schedule(_n)
286 #define napi_reschedule(_n) \
287 linuxkpi_napi_reschedule(_n)
288 #define napi_complete_done(_n, _r) \
289 linuxkpi_napi_complete_done(_n, _r)
290 #define napi_complete(_n) \
291 linuxkpi_napi_complete(_n)
292 #define napi_disable(_n) \
293 linuxkpi_napi_disable(_n)
294 #define napi_enable(_n) \
295 linuxkpi_napi_enable(_n)
296 #define napi_synchronize(_n) \
297 linuxkpi_napi_synchronize(_n)
298
299
300 static inline void
netif_napi_add_tx(struct net_device * dev,struct napi_struct * napi,int (* napi_poll)(struct napi_struct *,int))301 netif_napi_add_tx(struct net_device *dev, struct napi_struct *napi,
302 int(*napi_poll)(struct napi_struct *, int))
303 {
304
305 netif_napi_add(dev, napi, napi_poll);
306 }
307
308 static inline bool
napi_is_scheduled(struct napi_struct * napi)309 napi_is_scheduled(struct napi_struct *napi)
310 {
311
312 return (test_bit(LKPI_NAPI_FLAG_IS_SCHEDULED, &napi->state));
313 }
314
315 /* -------------------------------------------------------------------------- */
316
317 static inline void
netdev_rss_key_fill(uint32_t * buf,size_t len)318 netdev_rss_key_fill(uint32_t *buf, size_t len)
319 {
320
321 /*
322 * Remembering from a previous life there was discussions on what is
323 * a good RSS hash key. See end of rss_init() in net/rss_config.c.
324 * iwlwifi is looking for a 10byte "secret" so stay with random for now.
325 */
326 get_random_bytes(buf, len);
327 }
328
329 static inline void
__hw_addr_init(struct netdev_hw_addr_list * list)330 __hw_addr_init(struct netdev_hw_addr_list *list)
331 {
332 list->count = 0;
333 INIT_LIST_HEAD(&list->addr_list);
334 }
335
336 static inline int
netdev_hw_addr_list_count(struct netdev_hw_addr_list * list)337 netdev_hw_addr_list_count(struct netdev_hw_addr_list *list)
338 {
339
340 return (list->count);
341 }
342
343 static inline int
netdev_mc_count(struct net_device * ndev)344 netdev_mc_count(struct net_device *ndev)
345 {
346
347 return (netdev_hw_addr_list_count(&ndev->mc));
348 }
349
350 #define netdev_hw_addr_list_for_each(_addr, _list) \
351 list_for_each_entry((_addr), &(_list)->addr_list, addr_list)
352
353 #define netdev_for_each_mc_addr(na, ndev) \
354 netdev_hw_addr_list_for_each(na, &(ndev)->mc)
355
356 static __inline void
synchronize_net(void)357 synchronize_net(void)
358 {
359
360 /* We probably cannot do that unconditionally at some point anymore. */
361 synchronize_rcu();
362 }
363
364 static __inline void
netif_receive_skb_list(struct list_head * head)365 netif_receive_skb_list(struct list_head *head)
366 {
367
368 pr_debug("%s: TODO\n", __func__);
369 }
370
371 static __inline int
napi_gro_receive(struct napi_struct * napi,struct sk_buff * skb)372 napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
373 {
374
375 pr_debug("%s: TODO\n", __func__);
376 return (-1);
377 }
378
379 static __inline void
ether_setup(struct net_device * ndev)380 ether_setup(struct net_device *ndev)
381 {
382
383 pr_debug("%s: TODO\n", __func__);
384 }
385
386 static __inline void
dev_net_set(struct net_device * ndev,void * p)387 dev_net_set(struct net_device *ndev, void *p)
388 {
389
390 pr_debug("%s: TODO\n", __func__);
391 }
392
393 static __inline int
dev_set_threaded(struct net_device * ndev,bool threaded)394 dev_set_threaded(struct net_device *ndev, bool threaded)
395 {
396
397 pr_debug("%s: TODO\n", __func__);
398 return (-ENODEV);
399 }
400
401 /* -------------------------------------------------------------------------- */
402
403 static __inline bool
netif_carrier_ok(struct net_device * ndev)404 netif_carrier_ok(struct net_device *ndev)
405 {
406 pr_debug("%s: TODO\n", __func__);
407 return (false);
408 }
409
410 static __inline void
netif_carrier_off(struct net_device * ndev)411 netif_carrier_off(struct net_device *ndev)
412 {
413 pr_debug("%s: TODO\n", __func__);
414 }
415
416 static __inline void
netif_carrier_on(struct net_device * ndev)417 netif_carrier_on(struct net_device *ndev)
418 {
419 pr_debug("%s: TODO\n", __func__);
420 }
421
422 /* -------------------------------------------------------------------------- */
423
424 static __inline bool
netif_queue_stopped(struct net_device * ndev)425 netif_queue_stopped(struct net_device *ndev)
426 {
427 pr_debug("%s: TODO\n", __func__);
428 return (false);
429 }
430
431 static __inline void
netif_stop_queue(struct net_device * ndev)432 netif_stop_queue(struct net_device *ndev)
433 {
434 pr_debug("%s: TODO\n", __func__);
435 }
436
437 static __inline void
netif_wake_queue(struct net_device * ndev)438 netif_wake_queue(struct net_device *ndev)
439 {
440 pr_debug("%s: TODO\n", __func__);
441 }
442
443 /* -------------------------------------------------------------------------- */
444
445 static __inline int
register_netdevice(struct net_device * ndev)446 register_netdevice(struct net_device *ndev)
447 {
448
449 /* assert rtnl_locked? */
450 pr_debug("%s: TODO\n", __func__);
451 return (0);
452 }
453
454 static __inline int
register_netdev(struct net_device * ndev)455 register_netdev(struct net_device *ndev)
456 {
457 int error;
458
459 /* lock */
460 error = register_netdevice(ndev);
461 /* unlock */
462 pr_debug("%s: TODO\n", __func__);
463 return (error);
464 }
465
466 static __inline void
unregister_netdev(struct net_device * ndev)467 unregister_netdev(struct net_device *ndev)
468 {
469 pr_debug("%s: TODO\n", __func__);
470 }
471
472 static __inline void
unregister_netdevice(struct net_device * ndev)473 unregister_netdevice(struct net_device *ndev)
474 {
475 pr_debug("%s: TODO\n", __func__);
476 }
477
478 /* -------------------------------------------------------------------------- */
479
480 static __inline void
netif_rx(struct sk_buff * skb)481 netif_rx(struct sk_buff *skb)
482 {
483 pr_debug("%s: TODO\n", __func__);
484 }
485
486 static __inline void
netif_rx_ni(struct sk_buff * skb)487 netif_rx_ni(struct sk_buff *skb)
488 {
489 pr_debug("%s: TODO\n", __func__);
490 }
491
492 /* -------------------------------------------------------------------------- */
493
494 struct net_device *linuxkpi_alloc_netdev(size_t, const char *, uint32_t,
495 void(*)(struct net_device *));
496 void linuxkpi_free_netdev(struct net_device *);
497
498 #define alloc_netdev(_l, _n, _f, _func) \
499 linuxkpi_alloc_netdev(_l, _n, _f, _func)
500 #define alloc_netdev_dummy(_l) \
501 linuxkpi_alloc_netdev(_l, "dummy", NET_NAME_UNKNOWN, NULL)
502 #define free_netdev(_n) \
503 linuxkpi_free_netdev(_n)
504
505 static inline void *
netdev_priv(const struct net_device * ndev)506 netdev_priv(const struct net_device *ndev)
507 {
508
509 return (__DECONST(void *, ndev->drv_priv));
510 }
511
512 /* -------------------------------------------------------------------------- */
513
514 static __inline void
netif_device_attach(struct net_device * ndev)515 netif_device_attach(struct net_device *ndev)
516 {
517 pr_debug("%s: TODO\n", __func__);
518 }
519
520 static __inline void
netif_device_detach(struct net_device * ndev)521 netif_device_detach(struct net_device *ndev)
522 {
523 pr_debug("%s: TODO\n", __func__);
524 }
525
526
527 /* -------------------------------------------------------------------------- */
528 /* This is really rtnetlink and probably belongs elsewhere. */
529
530 #define rtnl_lock() do { } while(0)
531 #define rtnl_unlock() do { } while(0)
532 #define rcu_dereference_rtnl(x) READ_ONCE(x)
533
534 #endif /* _LINUXKPI_LINUX_NETDEVICE_H */
535