xref: /linux/drivers/s390/net/qeth_core.h (revision 621cde16e49b3ecf7d59a8106a20aaebfb4a59a9)
1  /* SPDX-License-Identifier: GPL-2.0 */
2  /*
3   *    Copyright IBM Corp. 2007
4   *    Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
5   *		 Frank Pavlic <fpavlic@de.ibm.com>,
6   *		 Thomas Spatzier <tspat@de.ibm.com>,
7   *		 Frank Blaschka <frank.blaschka@de.ibm.com>
8   */
9  
10  #ifndef __QETH_CORE_H__
11  #define __QETH_CORE_H__
12  
13  #include <linux/completion.h>
14  #include <linux/debugfs.h>
15  #include <linux/if.h>
16  #include <linux/if_arp.h>
17  #include <linux/etherdevice.h>
18  #include <linux/if_vlan.h>
19  #include <linux/ctype.h>
20  #include <linux/in6.h>
21  #include <linux/bitops.h>
22  #include <linux/seq_file.h>
23  #include <linux/hashtable.h>
24  #include <linux/ip.h>
25  #include <linux/rcupdate.h>
26  #include <linux/refcount.h>
27  #include <linux/timer.h>
28  #include <linux/types.h>
29  #include <linux/wait.h>
30  #include <linux/workqueue.h>
31  
32  #include <net/dst.h>
33  #include <net/ip6_fib.h>
34  #include <net/ipv6.h>
35  #include <net/if_inet6.h>
36  #include <net/addrconf.h>
37  #include <net/route.h>
38  #include <net/sch_generic.h>
39  #include <net/tcp.h>
40  
41  #include <asm/debug.h>
42  #include <asm/qdio.h>
43  #include <asm/ccwdev.h>
44  #include <asm/ccwgroup.h>
45  #include <asm/sysinfo.h>
46  
47  #include <uapi/linux/if_link.h>
48  
49  #include "qeth_core_mpc.h"
50  
51  /**
52   * Debug Facility stuff
53   */
54  enum qeth_dbf_names {
55  	QETH_DBF_SETUP,
56  	QETH_DBF_MSG,
57  	QETH_DBF_CTRL,
58  	QETH_DBF_INFOS	/* must be last element */
59  };
60  
61  struct qeth_dbf_info {
62  	char name[DEBUG_MAX_NAME_LEN];
63  	int pages;
64  	int areas;
65  	int len;
66  	int level;
67  	struct debug_view *view;
68  	debug_info_t *id;
69  };
70  
71  #define QETH_DBF_CTRL_LEN 256U
72  
73  #define QETH_DBF_TEXT(name, level, text) \
74  	debug_text_event(qeth_dbf[QETH_DBF_##name].id, level, text)
75  
76  #define QETH_DBF_HEX(name, level, addr, len) \
77  	debug_event(qeth_dbf[QETH_DBF_##name].id, level, (void *)(addr), len)
78  
79  #define QETH_DBF_MESSAGE(level, text...) \
80  	debug_sprintf_event(qeth_dbf[QETH_DBF_MSG].id, level, text)
81  
82  #define QETH_DBF_TEXT_(name, level, text...) \
83  	qeth_dbf_longtext(qeth_dbf[QETH_DBF_##name].id, level, text)
84  
85  #define QETH_CARD_TEXT(card, level, text) \
86  	debug_text_event(card->debug, level, text)
87  
88  #define QETH_CARD_HEX(card, level, addr, len) \
89  	debug_event(card->debug, level, (void *)(addr), len)
90  
91  #define QETH_CARD_MESSAGE(card, text...) \
92  	debug_sprintf_event(card->debug, level, text)
93  
94  #define QETH_CARD_TEXT_(card, level, text...) \
95  	qeth_dbf_longtext(card->debug, level, text)
96  
97  #define SENSE_COMMAND_REJECT_BYTE 0
98  #define SENSE_COMMAND_REJECT_FLAG 0x80
99  #define SENSE_RESETTING_EVENT_BYTE 1
100  #define SENSE_RESETTING_EVENT_FLAG 0x80
101  
qeth_get_device_id(struct ccw_device * cdev)102  static inline u32 qeth_get_device_id(struct ccw_device *cdev)
103  {
104  	struct ccw_dev_id dev_id;
105  	u32 id;
106  
107  	ccw_device_get_id(cdev, &dev_id);
108  	id = dev_id.devno;
109  	id |= (u32) (dev_id.ssid << 16);
110  
111  	return id;
112  }
113  
114  /*
115   * Common IO related definitions
116   */
117  #define CARD_RDEV(card) card->read.ccwdev
118  #define CARD_WDEV(card) card->write.ccwdev
119  #define CARD_DDEV(card) card->data.ccwdev
120  #define CARD_BUS_ID(card) dev_name(&card->gdev->dev)
121  #define CARD_RDEV_ID(card) dev_name(&card->read.ccwdev->dev)
122  #define CARD_WDEV_ID(card) dev_name(&card->write.ccwdev->dev)
123  #define CARD_DDEV_ID(card) dev_name(&card->data.ccwdev->dev)
124  #define CCW_DEVID(cdev)		(qeth_get_device_id(cdev))
125  #define CARD_DEVID(card)	(CCW_DEVID(CARD_RDEV(card)))
126  
127  /* Routing stuff */
128  struct qeth_routing_info {
129  	enum qeth_routing_types type;
130  };
131  
132  /* SETBRIDGEPORT stuff */
133  enum qeth_sbp_roles {
134  	QETH_SBP_ROLE_NONE	= 0,
135  	QETH_SBP_ROLE_PRIMARY	= 1,
136  	QETH_SBP_ROLE_SECONDARY	= 2,
137  };
138  
139  enum qeth_sbp_states {
140  	QETH_SBP_STATE_INACTIVE	= 0,
141  	QETH_SBP_STATE_STANDBY	= 1,
142  	QETH_SBP_STATE_ACTIVE	= 2,
143  };
144  
145  #define QETH_SBP_HOST_NOTIFICATION 1
146  
147  struct qeth_sbp_info {
148  	__u32 supported_funcs;
149  	enum qeth_sbp_roles role;
150  	__u32 hostnotification:1;
151  	__u32 reflect_promisc:1;
152  	__u32 reflect_promisc_primary:1;
153  };
154  
155  struct qeth_vnicc_info {
156  	/* supported/currently configured VNICCs; updated in IPA exchanges */
157  	u32 sup_chars;
158  	u32 cur_chars;
159  	/* supported commands: bitmasks which VNICCs support respective cmd */
160  	u32 set_char_sup;
161  	u32 getset_timeout_sup;
162  	/* timeout value for the learning characteristic */
163  	u32 learning_timeout;
164  	/* characteristics wanted/configured by user */
165  	u32 wanted_chars;
166  	/* has user explicitly enabled rx_bcast while online? */
167  	bool rx_bcast_enabled;
168  };
169  
170  #define QETH_IDX_FUNC_LEVEL_OSD		 0x0101
171  #define QETH_IDX_FUNC_LEVEL_IQD		 0x4108
172  
173  #define QETH_BUFSIZE		4096
174  #define CCW_CMD_WRITE		0x01
175  #define CCW_CMD_READ		0x02
176  
177  /**
178   * some more defs
179   */
180  #define QETH_TX_TIMEOUT		(100 * HZ)
181  #define QETH_RCD_TIMEOUT	(60 * HZ)
182  #define QETH_RECLAIM_WORK_TIME	HZ
183  #define QETH_MAX_PORTNO		15
184  
185  /*****************************************************************************/
186  /* QDIO queue and buffer handling                                            */
187  /*****************************************************************************/
188  #define QETH_MAX_OUT_QUEUES	4
189  #define QETH_IQD_MIN_TXQ	2	/* One for ucast, one for mcast. */
190  #define QETH_IQD_MCAST_TXQ	0
191  #define QETH_IQD_MIN_UCAST_TXQ	1
192  
193  #define QETH_MAX_IN_QUEUES	2
194  #define QETH_RX_COPYBREAK      (PAGE_SIZE >> 1)
195  #define QETH_IN_BUF_SIZE_DEFAULT 65536
196  #define QETH_IN_BUF_COUNT_DEFAULT 64
197  #define QETH_IN_BUF_COUNT_HSDEFAULT 128
198  #define QETH_IN_BUF_COUNT_MIN	8U
199  #define QETH_IN_BUF_COUNT_MAX	128U
200  #define QETH_MAX_BUFFER_ELEMENTS(card) ((card)->qdio.in_buf_size >> 12)
201  #define QETH_IN_BUF_REQUEUE_THRESHOLD(card) \
202  		 ((card)->qdio.in_buf_pool.buf_count / 2)
203  
204  /* buffers we have to be behind before we get a PCI */
205  #define QETH_PCI_THRESHOLD_A(card) ((card)->qdio.in_buf_pool.buf_count+1)
206  /*enqueued free buffers left before we get a PCI*/
207  #define QETH_PCI_THRESHOLD_B(card) 0
208  /*not used unless the microcode gets patched*/
209  #define QETH_PCI_TIMER_VALUE(card) 3
210  
211  /* priority queing */
212  #define QETH_PRIOQ_DEFAULT QETH_NO_PRIO_QUEUEING
213  #define QETH_DEFAULT_QUEUE    2
214  #define QETH_NO_PRIO_QUEUEING 0
215  #define QETH_PRIO_Q_ING_PREC  1
216  #define QETH_PRIO_Q_ING_TOS   2
217  #define QETH_PRIO_Q_ING_SKB   3
218  #define QETH_PRIO_Q_ING_VLAN  4
219  #define QETH_PRIO_Q_ING_FIXED 5
220  
221  /* Packing */
222  #define QETH_LOW_WATERMARK_PACK  2
223  #define QETH_HIGH_WATERMARK_PACK 5
224  #define QETH_WATERMARK_PACK_FUZZ 1
225  
226  struct qeth_hdr_layer3 {
227  	__u8  id;
228  	__u8  flags;
229  	__u16 inbound_checksum; /*TSO:__u16 seqno */
230  	__u32 token;		/*TSO: __u32 reserved */
231  	__u16 length;
232  	__u8  vlan_prio;
233  	__u8  ext_flags;
234  	__u16 vlan_id;
235  	__u16 frame_offset;
236  	union {
237  		/* TX: */
238  		struct in6_addr addr;
239  		/* RX: */
240  		struct rx {
241  			u8 res1[2];
242  			u8 src_mac[6];
243  			u8 res2[4];
244  			u16 vlan_id;
245  			u8 res3[2];
246  		} rx;
247  	} next_hop;
248  };
249  
250  struct qeth_hdr_layer2 {
251  	__u8 id;
252  	__u8 flags[3];
253  	__u8 port_no;
254  	__u8 hdr_length;
255  	__u16 pkt_length;
256  	__u16 seq_no;
257  	__u16 vlan_id;
258  	__u32 reserved;
259  	__u8 reserved2[16];
260  } __attribute__ ((packed));
261  
262  struct qeth_hdr {
263  	union {
264  		struct qeth_hdr_layer2 l2;
265  		struct qeth_hdr_layer3 l3;
266  	} hdr;
267  } __attribute__ ((packed));
268  
269  #define QETH_QIB_PQUE_ORDER_RR		0
270  #define QETH_QIB_PQUE_UNITS_SBAL	2
271  #define QETH_QIB_PQUE_PRIO_DEFAULT	4
272  
273  struct qeth_qib_parms {
274  	char pcit_magic[4];
275  	u32 pcit_a;
276  	u32 pcit_b;
277  	u32 pcit_c;
278  	char blkt_magic[4];
279  	u32 blkt_total;
280  	u32 blkt_inter_packet;
281  	u32 blkt_inter_packet_jumbo;
282  	char pque_magic[4];
283  	u8 pque_order;
284  	u8 pque_units;
285  	u16 reserved;
286  	u32 pque_priority[4];
287  };
288  
289  /*TCP Segmentation Offload header*/
290  struct qeth_hdr_ext_tso {
291  	__u16 hdr_tot_len;
292  	__u8  imb_hdr_no;
293  	__u8  reserved;
294  	__u8  hdr_type;
295  	__u8  hdr_version;
296  	__u16 hdr_len;
297  	__u32 payload_len;
298  	__u16 mss;
299  	__u16 dg_hdr_len;
300  	__u8  padding[16];
301  } __attribute__ ((packed));
302  
303  struct qeth_hdr_tso {
304  	struct qeth_hdr hdr;	/*hdr->hdr.l3.xxx*/
305  	struct qeth_hdr_ext_tso ext;
306  } __attribute__ ((packed));
307  
308  
309  /* flags for qeth_hdr.flags */
310  #define QETH_HDR_PASSTHRU 0x10
311  #define QETH_HDR_IPV6     0x80
312  #define QETH_HDR_CAST_MASK 0x07
313  enum qeth_cast_flags {
314  	QETH_CAST_UNICAST   = 0x06,
315  	QETH_CAST_MULTICAST = 0x04,
316  	QETH_CAST_BROADCAST = 0x05,
317  	QETH_CAST_ANYCAST   = 0x07,
318  	QETH_CAST_NOCAST    = 0x00,
319  };
320  
321  enum qeth_layer2_frame_flags {
322  	QETH_LAYER2_FLAG_MULTICAST = 0x01,
323  	QETH_LAYER2_FLAG_BROADCAST = 0x02,
324  	QETH_LAYER2_FLAG_UNICAST   = 0x04,
325  	QETH_LAYER2_FLAG_VLAN      = 0x10,
326  };
327  
328  enum qeth_header_ids {
329  	QETH_HEADER_TYPE_LAYER3 = 0x01,
330  	QETH_HEADER_TYPE_LAYER2 = 0x02,
331  	QETH_HEADER_TYPE_L3_TSO	= 0x03,
332  	QETH_HEADER_TYPE_L2_TSO	= 0x06,
333  	QETH_HEADER_MASK_INVAL	= 0x80,
334  };
335  /* flags for qeth_hdr.ext_flags */
336  #define QETH_HDR_EXT_VLAN_FRAME       0x01
337  #define QETH_HDR_EXT_TOKEN_ID         0x02
338  #define QETH_HDR_EXT_INCLUDE_VLAN_TAG 0x04
339  #define QETH_HDR_EXT_SRC_MAC_ADDR     0x08
340  #define QETH_HDR_EXT_CSUM_HDR_REQ     0x10
341  #define QETH_HDR_EXT_CSUM_TRANSP_REQ  0x20
342  #define QETH_HDR_EXT_UDP	      0x40 /*bit off for TCP*/
343  
qeth_l2_same_vlan(struct qeth_hdr_layer2 * h1,struct qeth_hdr_layer2 * h2)344  static inline bool qeth_l2_same_vlan(struct qeth_hdr_layer2 *h1,
345  				     struct qeth_hdr_layer2 *h2)
346  {
347  	return !((h1->flags[2] ^ h2->flags[2]) & QETH_LAYER2_FLAG_VLAN) &&
348  	       h1->vlan_id == h2->vlan_id;
349  }
350  
qeth_l3_iqd_same_vlan(struct qeth_hdr_layer3 * h1,struct qeth_hdr_layer3 * h2)351  static inline bool qeth_l3_iqd_same_vlan(struct qeth_hdr_layer3 *h1,
352  					 struct qeth_hdr_layer3 *h2)
353  {
354  	return !((h1->ext_flags ^ h2->ext_flags) & QETH_HDR_EXT_VLAN_FRAME) &&
355  	       h1->vlan_id == h2->vlan_id;
356  }
357  
qeth_l3_same_next_hop(struct qeth_hdr_layer3 * h1,struct qeth_hdr_layer3 * h2)358  static inline bool qeth_l3_same_next_hop(struct qeth_hdr_layer3 *h1,
359  					 struct qeth_hdr_layer3 *h2)
360  {
361  	return !((h1->flags ^ h2->flags) & QETH_HDR_IPV6) &&
362  	       ipv6_addr_equal(&h1->next_hop.addr, &h2->next_hop.addr);
363  }
364  
365  struct qeth_local_addr {
366  	struct hlist_node hnode;
367  	struct rcu_head rcu;
368  	struct in6_addr addr;
369  };
370  
371  enum qeth_qdio_info_states {
372  	QETH_QDIO_UNINITIALIZED,
373  	QETH_QDIO_ALLOCATED,
374  	QETH_QDIO_ESTABLISHED,
375  	QETH_QDIO_CLEANING
376  };
377  
378  struct qeth_buffer_pool_entry {
379  	struct list_head list;
380  	struct list_head init_list;
381  	struct page *elements[QDIO_MAX_ELEMENTS_PER_BUFFER];
382  };
383  
384  struct qeth_qdio_buffer_pool {
385  	struct list_head entry_list;
386  	int buf_count;
387  };
388  
389  struct qeth_qdio_buffer {
390  	struct qdio_buffer *buffer;
391  	/* the buffer pool entry currently associated to this buffer */
392  	struct qeth_buffer_pool_entry *pool_entry;
393  	struct sk_buff *rx_skb;
394  };
395  
396  struct qeth_qdio_q {
397  	struct qdio_buffer *qdio_bufs[QDIO_MAX_BUFFERS_PER_Q];
398  	struct qeth_qdio_buffer bufs[QDIO_MAX_BUFFERS_PER_Q];
399  	int next_buf_to_init;
400  };
401  
402  enum qeth_qdio_out_buffer_state {
403  	/* Owned by driver, in order to be filled. */
404  	QETH_QDIO_BUF_EMPTY,
405  	/* Filled by driver; owned by hardware in order to be sent. */
406  	QETH_QDIO_BUF_PRIMED,
407  };
408  
409  enum qeth_qaob_state {
410  	QETH_QAOB_ISSUED,
411  	QETH_QAOB_PENDING,
412  	QETH_QAOB_DONE,
413  };
414  
415  struct qeth_qaob_priv1 {
416  	unsigned int state;
417  	u8 queue_no;
418  };
419  
420  struct qeth_qdio_out_buffer {
421  	struct qdio_buffer *buffer;
422  	atomic_t state;
423  	int next_element_to_fill;
424  	unsigned int frames;
425  	unsigned int bytes;
426  	struct sk_buff_head skb_list;
427  	DECLARE_BITMAP(from_kmem_cache, QDIO_MAX_ELEMENTS_PER_BUFFER);
428  
429  	struct list_head list_entry;
430  	struct qaob *aob;
431  };
432  
433  struct qeth_card;
434  
435  #define QETH_CARD_STAT_ADD(_c, _stat, _val)	((_c)->stats._stat += (_val))
436  #define QETH_CARD_STAT_INC(_c, _stat)		QETH_CARD_STAT_ADD(_c, _stat, 1)
437  
438  #define QETH_TXQ_STAT_ADD(_q, _stat, _val)	((_q)->stats._stat += (_val))
439  #define QETH_TXQ_STAT_INC(_q, _stat)		QETH_TXQ_STAT_ADD(_q, _stat, 1)
440  
441  struct qeth_card_stats {
442  	u64 rx_bufs;
443  	u64 rx_skb_csum;
444  	u64 rx_sg_skbs;
445  	u64 rx_sg_frags;
446  	u64 rx_sg_alloc_page;
447  
448  	u64 rx_dropped_nomem;
449  	u64 rx_dropped_notsupp;
450  	u64 rx_dropped_runt;
451  
452  	/* rtnl_link_stats64 */
453  	u64 rx_packets;
454  	u64 rx_bytes;
455  	u64 rx_multicast;
456  	u64 rx_length_errors;
457  	u64 rx_frame_errors;
458  	u64 rx_fifo_errors;
459  };
460  
461  struct qeth_out_q_stats {
462  	u64 bufs;
463  	u64 bufs_pack;
464  	u64 buf_elements;
465  	u64 skbs_pack;
466  	u64 skbs_sg;
467  	u64 skbs_csum;
468  	u64 skbs_tso;
469  	u64 skbs_linearized;
470  	u64 skbs_linearized_fail;
471  	u64 tso_bytes;
472  	u64 packing_mode_switch;
473  	u64 stopped;
474  	u64 doorbell;
475  	u64 coal_frames;
476  	u64 completion_irq;
477  	u64 completion_yield;
478  	u64 completion_timer;
479  
480  	/* rtnl_link_stats64 */
481  	u64 tx_packets;
482  	u64 tx_bytes;
483  	u64 tx_errors;
484  	u64 tx_dropped;
485  };
486  
487  #define QETH_TX_MAX_COALESCED_FRAMES	1
488  #define QETH_TX_COALESCE_USECS		25
489  #define QETH_TX_TIMER_USECS		500
490  
491  struct qeth_qdio_out_q {
492  	struct qdio_buffer *qdio_bufs[QDIO_MAX_BUFFERS_PER_Q];
493  	struct qeth_qdio_out_buffer *bufs[QDIO_MAX_BUFFERS_PER_Q];
494  	struct list_head pending_bufs;
495  	struct qeth_out_q_stats stats;
496  	spinlock_t lock;
497  	unsigned int priority;
498  	u8 next_buf_to_fill;
499  	u8 max_elements;
500  	u8 queue_no;
501  	u8 do_pack;
502  	struct qeth_card *card;
503  	/*
504  	 * number of buffers that are currently filled (PRIMED)
505  	 * -> these buffers are hardware-owned
506  	 */
507  	atomic_t used_buffers;
508  	/* indicates whether PCI flag must be set (or if one is outstanding) */
509  	atomic_t set_pci_flags_count;
510  	struct napi_struct napi;
511  	struct timer_list timer;
512  	struct qeth_hdr *prev_hdr;
513  	unsigned int coalesced_frames;
514  	u8 bulk_start;
515  	u8 bulk_count;
516  	u8 bulk_max;
517  
518  	unsigned int coalesce_usecs;
519  	unsigned int max_coalesced_frames;
520  	unsigned int rescan_usecs;
521  };
522  
523  #define qeth_for_each_output_queue(card, q, i)		\
524  	for (i = 0; i < card->qdio.no_out_queues &&	\
525  		    (q = card->qdio.out_qs[i]); i++)
526  
527  #define	qeth_napi_to_out_queue(n) container_of(n, struct qeth_qdio_out_q, napi)
528  
qeth_tx_arm_timer(struct qeth_qdio_out_q * queue,unsigned long usecs)529  static inline void qeth_tx_arm_timer(struct qeth_qdio_out_q *queue,
530  				     unsigned long usecs)
531  {
532  	timer_reduce(&queue->timer, usecs_to_jiffies(usecs) + jiffies);
533  }
534  
qeth_out_queue_is_full(struct qeth_qdio_out_q * queue)535  static inline bool qeth_out_queue_is_full(struct qeth_qdio_out_q *queue)
536  {
537  	return atomic_read(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q;
538  }
539  
qeth_out_queue_is_empty(struct qeth_qdio_out_q * queue)540  static inline bool qeth_out_queue_is_empty(struct qeth_qdio_out_q *queue)
541  {
542  	return atomic_read(&queue->used_buffers) == 0;
543  }
544  
545  struct qeth_qdio_info {
546  	atomic_t state;
547  	/* input */
548  	struct qeth_qdio_q *in_q;
549  	struct qeth_qdio_q *c_q;
550  	struct qeth_qdio_buffer_pool in_buf_pool;
551  	struct qeth_qdio_buffer_pool init_pool;
552  	int in_buf_size;
553  
554  	/* output */
555  	unsigned int no_out_queues;
556  	struct qeth_qdio_out_q *out_qs[QETH_MAX_OUT_QUEUES];
557  
558  	/* priority queueing */
559  	int do_prio_queueing;
560  	int default_out_queue;
561  };
562  
563  /**
564   *  channel state machine
565   */
566  enum qeth_channel_states {
567  	CH_STATE_UP,
568  	CH_STATE_DOWN,
569  	CH_STATE_HALTED,
570  	CH_STATE_STOPPED,
571  };
572  /**
573   * card state machine
574   */
575  enum qeth_card_states {
576  	CARD_STATE_DOWN,
577  	CARD_STATE_SOFTSETUP,
578  };
579  
580  /**
581   * Protocol versions
582   */
583  enum qeth_prot_versions {
584  	QETH_PROT_NONE = 0x0000,
585  	QETH_PROT_IPV4 = 0x0004,
586  	QETH_PROT_IPV6 = 0x0006,
587  };
588  
589  enum qeth_cq {
590  	QETH_CQ_DISABLED = 0,
591  	QETH_CQ_ENABLED = 1,
592  	QETH_CQ_NOTAVAILABLE = 2,
593  };
594  
595  struct qeth_ipato {
596  	bool enabled;
597  	bool invert4;
598  	bool invert6;
599  	struct list_head entries;
600  };
601  
602  struct qeth_channel {
603  	struct ccw_device *ccwdev;
604  	struct qeth_cmd_buffer *active_cmd;
605  	enum qeth_channel_states state;
606  };
607  
608  struct qeth_reply {
609  	int (*callback)(struct qeth_card *card, struct qeth_reply *reply,
610  			unsigned long data);
611  	void *param;
612  };
613  
614  struct qeth_cmd_buffer {
615  	struct list_head list_entry;
616  	struct completion done;
617  	spinlock_t lock;
618  	unsigned int length;
619  	refcount_t ref_count;
620  	struct qeth_channel *channel;
621  	struct qeth_reply reply;
622  	long timeout;
623  	unsigned char *data;
624  	void (*finalize)(struct qeth_card *card, struct qeth_cmd_buffer *iob);
625  	bool (*match)(struct qeth_cmd_buffer *iob,
626  		      struct qeth_cmd_buffer *reply);
627  	void (*callback)(struct qeth_card *card, struct qeth_cmd_buffer *iob,
628  			 unsigned int data_length);
629  	int rc;
630  };
631  
qeth_get_cmd(struct qeth_cmd_buffer * iob)632  static inline void qeth_get_cmd(struct qeth_cmd_buffer *iob)
633  {
634  	refcount_inc(&iob->ref_count);
635  }
636  
__ipa_reply(struct qeth_cmd_buffer * iob)637  static inline struct qeth_ipa_cmd *__ipa_reply(struct qeth_cmd_buffer *iob)
638  {
639  	if (!IS_IPA(iob->data))
640  		return NULL;
641  
642  	return (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data);
643  }
644  
__ipa_cmd(struct qeth_cmd_buffer * iob)645  static inline struct qeth_ipa_cmd *__ipa_cmd(struct qeth_cmd_buffer *iob)
646  {
647  	return (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE);
648  }
649  
__ccw_from_cmd(struct qeth_cmd_buffer * iob)650  static inline struct ccw1 *__ccw_from_cmd(struct qeth_cmd_buffer *iob)
651  {
652  	return (struct ccw1 *)(iob->data + ALIGN(iob->length, 8));
653  }
654  
655  /**
656   *  OSA card related definitions
657   */
658  struct qeth_token {
659  	__u32 issuer_rm_w;
660  	__u32 issuer_rm_r;
661  	__u32 cm_filter_w;
662  	__u32 cm_filter_r;
663  	__u32 cm_connection_w;
664  	__u32 cm_connection_r;
665  	__u32 ulp_filter_w;
666  	__u32 ulp_filter_r;
667  	__u32 ulp_connection_w;
668  	__u32 ulp_connection_r;
669  };
670  
671  struct qeth_seqno {
672  	__u32 trans_hdr;
673  	__u32 pdu_hdr;
674  	__u32 pdu_hdr_ack;
675  	__u16 ipa;
676  };
677  
678  struct qeth_card_blkt {
679  	int time_total;
680  	int inter_packet;
681  	int inter_packet_jumbo;
682  };
683  
684  enum qeth_pnso_mode {
685  	QETH_PNSO_NONE,
686  	QETH_PNSO_BRIDGEPORT,
687  	QETH_PNSO_ADDR_INFO,
688  };
689  
690  enum qeth_link_mode {
691  	QETH_LINK_MODE_UNKNOWN,
692  	QETH_LINK_MODE_FIBRE_SHORT,
693  	QETH_LINK_MODE_FIBRE_LONG,
694  };
695  
696  struct qeth_link_info {
697  	u32 speed;
698  	u8 duplex;
699  	u8 port;
700  	enum qeth_link_mode link_mode;
701  };
702  
703  #define QETH_BROADCAST_WITH_ECHO    0x01
704  #define QETH_BROADCAST_WITHOUT_ECHO 0x02
705  struct qeth_card_info {
706  	unsigned short unit_addr2;
707  	unsigned short cula;
708  	__u16 func_level;
709  	char mcl_level[QETH_MCL_LENGTH + 1];
710  	/* doubleword below corresponds to net_if_token */
711  	u16 ddev_devno;
712  	u8 cssid;
713  	u8 iid;
714  	u8 ssid;
715  	u8 chpid;
716  	u16 chid;
717  	u8 ids_valid:1; /* cssid,iid,chid */
718  	u8 dev_addr_is_registered:1;
719  	u8 promisc_mode:1;
720  	u8 use_v1_blkt:1;
721  	u8 is_vm_nic:1;
722  	/* no bitfield, we take a pointer on these two: */
723  	u8 has_lp2lp_cso_v6;
724  	u8 has_lp2lp_cso_v4;
725  	enum qeth_pnso_mode pnso_mode;
726  	enum qeth_card_types type;
727  	enum qeth_link_types link_type;
728  	int broadcast_capable;
729  	bool layer_enforced;
730  	struct qeth_card_blkt blkt;
731  	__u32 diagass_support;
732  	__u32 hwtrap;
733  	struct qeth_link_info link_info;
734  };
735  
736  enum qeth_discipline_id {
737  	QETH_DISCIPLINE_UNDETERMINED = -1,
738  	QETH_DISCIPLINE_LAYER3 = 0,
739  	QETH_DISCIPLINE_LAYER2 = 1,
740  };
741  
742  struct qeth_card_options {
743  	struct qeth_ipa_caps ipa4;
744  	struct qeth_ipa_caps ipa6;
745  	struct qeth_routing_info route4;
746  	struct qeth_routing_info route6;
747  	struct qeth_ipa_caps adp; /* Adapter parameters */
748  	struct qeth_sbp_info sbp; /* SETBRIDGEPORT options */
749  	struct qeth_vnicc_info vnicc; /* VNICC options */
750  	enum qeth_discipline_id layer;
751  	enum qeth_ipa_isolation_modes isolation;
752  	int sniffer;
753  	enum qeth_cq cq;
754  	char hsuid[9];
755  };
756  
757  #define	IS_LAYER2(card)	((card)->options.layer == QETH_DISCIPLINE_LAYER2)
758  #define	IS_LAYER3(card)	((card)->options.layer == QETH_DISCIPLINE_LAYER3)
759  
760  /*
761   * thread bits for qeth_card thread masks
762   */
763  enum qeth_threads {
764  	QETH_RECOVER_THREAD = 1,
765  };
766  
767  struct qeth_discipline {
768  	int (*setup) (struct ccwgroup_device *);
769  	void (*remove) (struct ccwgroup_device *);
770  	int (*set_online)(struct qeth_card *card, bool carrier_ok);
771  	void (*set_offline)(struct qeth_card *card);
772  	int (*control_event_handler)(struct qeth_card *card,
773  					struct qeth_ipa_cmd *cmd);
774  };
775  
776  enum qeth_addr_disposition {
777  	QETH_DISP_ADDR_DELETE = 0,
778  	QETH_DISP_ADDR_DO_NOTHING = 1,
779  	QETH_DISP_ADDR_ADD = 2,
780  };
781  
782  struct qeth_rx {
783  	int b_count;
784  	int b_index;
785  	u8 buf_element;
786  	int e_offset;
787  	int qdio_err;
788  	u8 bufs_refill;
789  };
790  
791  struct qeth_switch_info {
792  	__u32 capabilities;
793  	__u32 settings;
794  };
795  
796  struct qeth_priv {
797  	unsigned int rx_copybreak;
798  	unsigned int tx_wanted_queues;
799  	u32 brport_hw_features;
800  	u32 brport_features;
801  };
802  
803  struct qeth_card {
804  	enum qeth_card_states state;
805  	spinlock_t lock;
806  	struct ccwgroup_device *gdev;
807  	struct qeth_cmd_buffer *read_cmd;
808  	struct qeth_channel read;
809  	struct qeth_channel write;
810  	struct qeth_channel data;
811  
812  	struct net_device *dev;
813  	struct dentry *debugfs;
814  	struct qeth_card_stats stats;
815  	struct qeth_card_info info;
816  	struct qeth_token token;
817  	struct qeth_seqno seqno;
818  	struct qeth_card_options options;
819  
820  	struct workqueue_struct *event_wq;
821  	struct workqueue_struct *cmd_wq;
822  	wait_queue_head_t wait_q;
823  
824  	struct mutex ip_lock;
825  	/* protected by ip_lock: */
826  	DECLARE_HASHTABLE(ip_htable, 4);
827  	struct qeth_ipato ipato;
828  
829  	DECLARE_HASHTABLE(local_addrs4, 4);
830  	DECLARE_HASHTABLE(local_addrs6, 4);
831  	spinlock_t local_addrs4_lock;
832  	spinlock_t local_addrs6_lock;
833  	DECLARE_HASHTABLE(rx_mode_addrs, 4);
834  	struct work_struct rx_mode_work;
835  	struct work_struct kernel_thread_starter;
836  	spinlock_t thread_mask_lock;
837  	unsigned long thread_start_mask;
838  	unsigned long thread_allowed_mask;
839  	unsigned long thread_running_mask;
840  	struct list_head cmd_waiter_list;
841  	/* QDIO buffer handling */
842  	struct qeth_qdio_info qdio;
843  	int read_or_write_problem;
844  	const struct qeth_discipline *discipline;
845  	atomic_t force_alloc_skb;
846  	struct service_level qeth_service_level;
847  	struct qdio_ssqd_desc ssqd;
848  	debug_info_t *debug;
849  	struct mutex sbp_lock;
850  	struct mutex conf_mutex;
851  	struct mutex discipline_mutex;
852  	struct napi_struct napi;
853  	struct qeth_rx rx;
854  	struct delayed_work buffer_reclaim_work;
855  };
856  
qeth_card_hw_is_reachable(struct qeth_card * card)857  static inline bool qeth_card_hw_is_reachable(struct qeth_card *card)
858  {
859  	return card->state == CARD_STATE_SOFTSETUP;
860  }
861  
qeth_use_tx_irqs(struct qeth_card * card)862  static inline bool qeth_use_tx_irqs(struct qeth_card *card)
863  {
864  	return !IS_IQD(card);
865  }
866  
qeth_unlock_channel(struct qeth_card * card,struct qeth_channel * channel)867  static inline void qeth_unlock_channel(struct qeth_card *card,
868  				       struct qeth_channel *channel)
869  {
870  	xchg(&channel->active_cmd, NULL);
871  	wake_up(&card->wait_q);
872  }
873  
qeth_trylock_channel(struct qeth_channel * channel,struct qeth_cmd_buffer * cmd)874  static inline bool qeth_trylock_channel(struct qeth_channel *channel,
875  					struct qeth_cmd_buffer *cmd)
876  {
877  	return cmpxchg(&channel->active_cmd, NULL, cmd) == NULL;
878  }
879  
880  struct qeth_trap_id {
881  	__u16 lparnr;
882  	char vmname[8];
883  	__u8 chpid;
884  	__u8 ssid;
885  	__u16 devno;
886  } __packed;
887  
qeth_uses_tx_prio_queueing(struct qeth_card * card)888  static inline bool qeth_uses_tx_prio_queueing(struct qeth_card *card)
889  {
890  	return card->qdio.do_prio_queueing != QETH_NO_PRIO_QUEUEING;
891  }
892  
qeth_tx_actual_queues(struct qeth_card * card)893  static inline unsigned int qeth_tx_actual_queues(struct qeth_card *card)
894  {
895  	struct qeth_priv *priv = netdev_priv(card->dev);
896  
897  	if (qeth_uses_tx_prio_queueing(card))
898  		return min(card->dev->num_tx_queues, card->qdio.no_out_queues);
899  
900  	return min(priv->tx_wanted_queues, card->qdio.no_out_queues);
901  }
902  
qeth_iqd_translate_txq(struct net_device * dev,u16 txq)903  static inline u16 qeth_iqd_translate_txq(struct net_device *dev, u16 txq)
904  {
905  	if (txq == QETH_IQD_MCAST_TXQ)
906  		return dev->num_tx_queues - 1;
907  	if (txq == dev->num_tx_queues - 1)
908  		return QETH_IQD_MCAST_TXQ;
909  	return txq;
910  }
911  
qeth_iqd_is_mcast_queue(struct qeth_card * card,struct qeth_qdio_out_q * queue)912  static inline bool qeth_iqd_is_mcast_queue(struct qeth_card *card,
913  					   struct qeth_qdio_out_q *queue)
914  {
915  	return qeth_iqd_translate_txq(card->dev, queue->queue_no) ==
916  	       QETH_IQD_MCAST_TXQ;
917  }
918  
qeth_scrub_qdio_buffer(struct qdio_buffer * buf,unsigned int elements)919  static inline void qeth_scrub_qdio_buffer(struct qdio_buffer *buf,
920  					  unsigned int elements)
921  {
922  	unsigned int i;
923  
924  	for (i = 0; i < elements; i++)
925  		memset(&buf->element[i], 0, sizeof(struct qdio_buffer_element));
926  	buf->element[14].sflags = 0;
927  	buf->element[15].sflags = 0;
928  }
929  
930  /**
931   * qeth_get_elements_for_range() -	find number of SBALEs to cover range.
932   * @start:				Start of the address range.
933   * @end:				Address after the end of the range.
934   *
935   * Returns the number of pages, and thus QDIO buffer elements, needed to cover
936   * the specified address range.
937   */
qeth_get_elements_for_range(addr_t start,addr_t end)938  static inline int qeth_get_elements_for_range(addr_t start, addr_t end)
939  {
940  	return PFN_UP(end) - PFN_DOWN(start);
941  }
942  
qeth_get_ether_cast_type(struct sk_buff * skb)943  static inline int qeth_get_ether_cast_type(struct sk_buff *skb)
944  {
945  	u8 *addr = eth_hdr(skb)->h_dest;
946  
947  	if (is_multicast_ether_addr(addr))
948  		return is_broadcast_ether_addr(addr) ? RTN_BROADCAST :
949  						       RTN_MULTICAST;
950  	return RTN_UNICAST;
951  }
952  
qeth_dst_check_rcu(struct sk_buff * skb,__be16 proto)953  static inline struct dst_entry *qeth_dst_check_rcu(struct sk_buff *skb,
954  						   __be16 proto)
955  {
956  	struct dst_entry *dst = skb_dst(skb);
957  	struct rt6_info *rt;
958  
959  	rt = dst_rt6_info(dst);
960  	if (dst) {
961  		if (proto == htons(ETH_P_IPV6))
962  			dst = dst_check(dst, rt6_get_cookie(rt));
963  		else
964  			dst = dst_check(dst, 0);
965  	}
966  
967  	return dst;
968  }
969  
qeth_next_hop_v4_rcu(struct sk_buff * skb,struct dst_entry * dst)970  static inline __be32 qeth_next_hop_v4_rcu(struct sk_buff *skb,
971  					  struct dst_entry *dst)
972  {
973  	return (dst) ? rt_nexthop(dst_rtable(dst), ip_hdr(skb)->daddr) :
974  		       ip_hdr(skb)->daddr;
975  }
976  
qeth_next_hop_v6_rcu(struct sk_buff * skb,struct dst_entry * dst)977  static inline struct in6_addr *qeth_next_hop_v6_rcu(struct sk_buff *skb,
978  						    struct dst_entry *dst)
979  {
980  	struct rt6_info *rt = dst_rt6_info(dst);
981  
982  	if (rt && !ipv6_addr_any(&rt->rt6i_gateway))
983  		return &rt->rt6i_gateway;
984  	else
985  		return &ipv6_hdr(skb)->daddr;
986  }
987  
qeth_tx_csum(struct sk_buff * skb,u8 * flags,__be16 proto)988  static inline void qeth_tx_csum(struct sk_buff *skb, u8 *flags, __be16 proto)
989  {
990  	*flags |= QETH_HDR_EXT_CSUM_TRANSP_REQ;
991  	if ((proto == htons(ETH_P_IP) && ip_hdr(skb)->protocol == IPPROTO_UDP) ||
992  	    (proto == htons(ETH_P_IPV6) && ipv6_hdr(skb)->nexthdr == IPPROTO_UDP))
993  		*flags |= QETH_HDR_EXT_UDP;
994  }
995  
qeth_put_buffer_pool_entry(struct qeth_card * card,struct qeth_buffer_pool_entry * entry)996  static inline void qeth_put_buffer_pool_entry(struct qeth_card *card,
997  		struct qeth_buffer_pool_entry *entry)
998  {
999  	list_add_tail(&entry->list, &card->qdio.in_buf_pool.entry_list);
1000  }
1001  
qeth_is_diagass_supported(struct qeth_card * card,enum qeth_diags_cmds cmd)1002  static inline int qeth_is_diagass_supported(struct qeth_card *card,
1003  		enum qeth_diags_cmds cmd)
1004  {
1005  	return card->info.diagass_support & (__u32)cmd;
1006  }
1007  
1008  int qeth_send_simple_setassparms_prot(struct qeth_card *card,
1009  				      enum qeth_ipa_funcs ipa_func,
1010  				      u16 cmd_code, u32 *data,
1011  				      enum qeth_prot_versions prot);
1012  /* IPv4 variant */
qeth_send_simple_setassparms(struct qeth_card * card,enum qeth_ipa_funcs ipa_func,u16 cmd_code,u32 * data)1013  static inline int qeth_send_simple_setassparms(struct qeth_card *card,
1014  					       enum qeth_ipa_funcs ipa_func,
1015  					       u16 cmd_code, u32 *data)
1016  {
1017  	return qeth_send_simple_setassparms_prot(card, ipa_func, cmd_code,
1018  						 data, QETH_PROT_IPV4);
1019  }
1020  
qeth_send_simple_setassparms_v6(struct qeth_card * card,enum qeth_ipa_funcs ipa_func,u16 cmd_code,u32 * data)1021  static inline int qeth_send_simple_setassparms_v6(struct qeth_card *card,
1022  						  enum qeth_ipa_funcs ipa_func,
1023  						  u16 cmd_code, u32 *data)
1024  {
1025  	return qeth_send_simple_setassparms_prot(card, ipa_func, cmd_code,
1026  						 data, QETH_PROT_IPV6);
1027  }
1028  
1029  extern const struct qeth_discipline qeth_l2_discipline;
1030  extern const struct qeth_discipline qeth_l3_discipline;
1031  extern const struct ethtool_ops qeth_ethtool_ops;
1032  extern const struct attribute_group *qeth_dev_groups[];
1033  
1034  const char *qeth_get_cardname_short(struct qeth_card *);
1035  int qeth_resize_buffer_pool(struct qeth_card *card, unsigned int count);
1036  int qeth_setup_discipline(struct qeth_card *card, enum qeth_discipline_id disc);
1037  void qeth_remove_discipline(struct qeth_card *card);
1038  
1039  /* exports for qeth discipline device drivers */
1040  extern struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS];
1041  
1042  struct net_device *qeth_clone_netdev(struct net_device *orig);
1043  void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
1044  			      int clear_start_mask);
1045  int qeth_threads_running(struct qeth_card *, unsigned long);
1046  int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc,
1047  		     bool resetting);
1048  
1049  int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
1050  		  int (*reply_cb)
1051  		  (struct qeth_card *, struct qeth_reply *, unsigned long),
1052  		  void *);
1053  struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card,
1054  					   enum qeth_ipa_cmds cmd_code,
1055  					   enum qeth_prot_versions prot,
1056  					   unsigned int data_length);
1057  struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
1058  						 enum qeth_ipa_funcs ipa_func,
1059  						 u16 cmd_code,
1060  						 unsigned int data_length,
1061  						 enum qeth_prot_versions prot);
1062  struct qeth_cmd_buffer *qeth_get_diag_cmd(struct qeth_card *card,
1063  					  enum qeth_diags_cmds sub_cmd,
1064  					  unsigned int data_length);
1065  
1066  int qeth_schedule_recovery(struct qeth_card *card);
1067  int qeth_poll(struct napi_struct *napi, int budget);
1068  void qeth_setadp_promisc_mode(struct qeth_card *card, bool enable);
1069  int qeth_setadpparms_change_macaddr(struct qeth_card *);
1070  void qeth_tx_timeout(struct net_device *, unsigned int txqueue);
1071  int qeth_query_switch_attributes(struct qeth_card *card,
1072  				  struct qeth_switch_info *sw_info);
1073  int qeth_query_card_info(struct qeth_card *card,
1074  			 struct qeth_link_info *link_info);
1075  int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
1076  				     enum qeth_ipa_isolation_modes mode);
1077  
1078  int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
1079  int qeth_siocdevprivate(struct net_device *dev, struct ifreq *rq,
1080  			void __user *data, int cmd);
1081  __printf(3, 4)
1082  void qeth_dbf_longtext(debug_info_t *id, int level, char *text, ...);
1083  int qeth_configure_cq(struct qeth_card *, enum qeth_cq);
1084  int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action);
1085  int qeth_setassparms_cb(struct qeth_card *, struct qeth_reply *, unsigned long);
1086  int qeth_set_features(struct net_device *, netdev_features_t);
1087  void qeth_enable_hw_features(struct net_device *dev);
1088  netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t);
1089  netdev_features_t qeth_features_check(struct sk_buff *skb,
1090  				      struct net_device *dev,
1091  				      netdev_features_t features);
1092  void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats);
1093  int qeth_set_real_num_tx_queues(struct qeth_card *card, unsigned int count);
1094  u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
1095  			  u8 cast_type, struct net_device *sb_dev);
1096  u16 qeth_osa_select_queue(struct net_device *dev, struct sk_buff *skb,
1097  			  struct net_device *sb_dev);
1098  int qeth_open(struct net_device *dev);
1099  int qeth_stop(struct net_device *dev);
1100  
1101  int qeth_vm_request_mac(struct qeth_card *card);
1102  int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
1103  	      struct qeth_qdio_out_q *queue, __be16 proto,
1104  	      void (*fill_header)(struct qeth_qdio_out_q *queue,
1105  				  struct qeth_hdr *hdr, struct sk_buff *skb,
1106  				  __be16 proto, unsigned int data_len));
1107  
1108  #endif /* __QETH_CORE_H__ */
1109