xref: /titanic_41/usr/src/uts/common/io/chxge/sge.h (revision d39a76e7b087a3d0927cbe6898dc0a6770fa6c68)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * This file is part of the Chelsio T1 Ethernet driver.
29  *
30  * Copyright (C) 2003-2005 Chelsio Communications.  All rights reserved.
31  */
32 
33 #ifndef _CHELSIO_SGE_H
34 #define	_CHELSIO_SGE_H
35 
36 #pragma ident	"%Z%%M%	%I%	%E% SMI"
37 
38 #ifdef __cplusplus
39 extern "C" {
40 #endif
41 
42 #include <sys/types.h>
43 
44 #include "osdep.h"
45 
46 #define	MBLK_MAX 8
47 
48 #define	spin_lock mutex_enter
49 #define	spin_unlock mutex_exit
50 #define	atomic_sub(a, b) atomic_add_32(b, -(a))
51 #define	atomic_add(a, b) atomic_add_32(b, (a))
52 #define	atomic_read(a) (a)
53 #define	atomic_set(a, b) (*(a) = b)
54 #define	spinlock_t kmutex_t
55 #define	dma_addr_t uint64_t
56 #define	wmb() membar_producer()
57 #define	doorbell_pio(sge, cmd) sge_ring_doorbell(sge, cmd)
58 #define	skb_reserve(skb, offset) (skb->b_rptr += offset)
59 #define	__skb_pull(skb, len) (skb->b_rptr += len)
60 #define	skb_put(skb, len) ((skb)->b_wptr  = (skb)->b_rptr + (len))
61 #define	skb_pull(skb, len) (skb->b_rptr += len)
62 #define	unlikely(a) (a)
63 #define	likely(a) (a)
64 #define	SKB_DATA_ALIGN(X) (((X) + (sizeof (long)-1)) & ~(sizeof (long)-1))
65 #define	t1_is_T1B(adap) adapter_matches_type(adap, CHBT_TERM_T1, TERM_T1B)
66 #define	t1_is_T1C(adap) adapter_matches_type(adap, CHBT_TERM_T1, TERM_T1C)
67 
68 #define	SGE_SM_BUF_SZ(sa)	(sa->ch_sm_buf_sz)
69 #define	SGE_BG_BUF_SZ(sa)	(sa->ch_bg_buf_sz)
70 
71 #define	SGE_CMDQ_N		2
72 #define	SGE_FREELQ_N		2
73 #ifdef CONFIG_CHELSIO_T1_OFFLOAD
74 #define	SGE_CMDQ0_E_N		4096
75 #define	SGE_CMDQ1_E_N		128
76 #define	SGE_FREELQ0_E_N		2048
77 #define	SGE_FREELQ1_E_N		1024
78 #define	SGE_RESPQ_E_N		7168    /* |CMDQ0| + |FREELQ0| + |FREELQ1| */
79 #else
80 #define	SGE_CMDQ0_E_N		2048
81 #define	SGE_CMDQ1_E_N		128
82 #define	SGE_FREELQ0_E_N		4096
83 #define	SGE_FREELQ1_E_N		1024
84 #define	SGE_RESPQ_E_N		7168    /* |CMDQ0| + |FREELQ0| + |FREELQ1| */
85 #endif  /* CONFIG_CHELSIO_T1_OFFLOAD */
86 #define	SGE_BATCH_THRESH	16
87 #define	SGE_INTR_BUCKETSIZE	100
88 #define	SGE_INTR_MAXBUCKETS	11
89 #define	SGE_INTRTIMER0		1
90 #define	SGE_INTRTIMER1		30
91 #define	SGE_INTRTIMER_NRES	10000
92 #define	SGE_RX_COPY_THRESHOLD	256
93 #define	SGE_RX_OFFSET		2
94 #ifdef CONFIG_CHELSIO_T1_OFFLOAD
95 #define	SGE_RX_SM_BUF_SIZE(sa)	1536
96 #else
97 #define	SGE_RX_SM_BUF_SIZE(sa)	(sa->ch_sm_buf_sz)
98 #endif
99 
100 /*
101  * CPL5 Defines
102  */
103 #define	FLITSTOBYTES    8
104 
105 #define	CPL_FORMAT_0_SIZE 8
106 #define	CPL_FORMAT_1_SIZE 16
107 #define	CPL_FORMAT_2_SIZE 24
108 #define	CPL_FORMAT_3_SIZE 32
109 #define	CPL_FORMAT_4_SIZE 40
110 #define	CPL_FORMAT_5_SIZE 48
111 
112 #define	TID_MASK 0xffffff
113 
114 #define	SZ_CPL_RX_PKT CPL_FORMAT_0_SIZE
115 
116 #if BYTE_ORDER == BIG_ENDIAN
117 
118 typedef struct {
119 	u32 AddrLow;
120 	u32 GenerationBit: 1;
121 	u32 BufferLength: 31;
122 	u32 RespQueueSelector: 4;
123 	u32 ResponseTokens: 12;
124 	u32 CmdId: 8;
125 	u32 Reserved: 3;
126 	u32 TokenValid: 1;
127 	u32 Eop: 1;
128 	u32 Sop: 1;
129 	u32 DataValid: 1;
130 	u32 GenerationBit2: 1;
131 	u32 AddrHigh;
132 } CmdQueueEntry;
133 
134 
135 #elif BYTE_ORDER == LITTLE_ENDIAN
136 
137 
138 typedef struct {
139 	u32 BufferLength: 31;
140 	u32 GenerationBit: 1;
141 	u32 AddrLow;
142 	u32 AddrHigh;
143 	u32 GenerationBit2: 1;
144 	u32 DataValid: 1;
145 	u32 Sop: 1;
146 	u32 Eop: 1;
147 	u32 TokenValid: 1;
148 	u32 Reserved: 3;
149 	u32 CmdId: 8;
150 	u32 ResponseTokens: 12;
151 	u32 RespQueueSelector: 4;
152 } CmdQueueEntry;
153 
154 #endif
155 
156 
157 typedef CmdQueueEntry cmdQ_e;
158 
159 #if BYTE_ORDER == BIG_ENDIAN
160 
161 typedef struct {
162 	u32 Qsleeping: 4;
163 	u32 Cmdq1CreditReturn: 5;
164 	u32 Cmdq1DmaComplete: 5;
165 	u32 Cmdq0CreditReturn: 5;
166 	u32 Cmdq0DmaComplete: 5;
167 	u32 FreelistQid: 2;
168 	u32 CreditValid: 1;
169 	u32 DataValid: 1;
170 	u32 Offload: 1;
171 	u32 Eop: 1;
172 	u32 Sop: 1;
173 	u32 GenerationBit: 1;
174 	u32 BufferLength;
175 } ResponseQueueEntry;
176 
177 
178 #elif BYTE_ORDER == LITTLE_ENDIAN
179 
180 
181 typedef struct {
182 	u32 BufferLength;
183 	u32 GenerationBit: 1;
184 	u32 Sop: 1;
185 	u32 Eop: 1;
186 	u32 Offload: 1;
187 	u32 DataValid: 1;
188 	u32 CreditValid: 1;
189 	u32 FreelistQid: 2;
190 	u32 Cmdq0DmaComplete: 5;
191 	u32 Cmdq0CreditReturn: 5;
192 	u32 Cmdq1DmaComplete: 5;
193 	u32 Cmdq1CreditReturn: 5;
194 	u32 Qsleeping: 4;
195 } ResponseQueueEntry;
196 
197 #endif
198 
199 typedef ResponseQueueEntry respQ_e;
200 
201 #if BYTE_ORDER == BIG_ENDIAN
202 
203 
204 typedef struct {
205 	u32 AddrLow;
206 	u32 GenerationBit: 1;
207 	u32 BufferLength: 31;
208 	u32 Reserved: 31;
209 	u32 GenerationBit2: 1;
210 	u32 AddrHigh;
211 } FLQueueEntry;
212 
213 
214 #elif BYTE_ORDER == LITTLE_ENDIAN
215 
216 
217 typedef struct {
218 	u32 BufferLength: 31;
219 	u32 GenerationBit: 1;
220 	u32 AddrLow;
221 	u32 AddrHigh;
222 	u32 GenerationBit2: 1;
223 	u32 Reserved: 31;
224 } FLQueueEntry;
225 
226 
227 #endif
228 
229 typedef FLQueueEntry freelQ_e;
230 
231 /*
232  * Command QUEUE meta entry format.
233  */
234 typedef struct cmdQ_ce {
235 	void *ce_mp;		/* head mblk of pkt */
236 	free_dh_t *ce_dh;	/* ddi dma handle */
237 	uint_t ce_flg;		/* flag 0 - NIC descriptor; 1 - TOE */
238 	uint_t ce_len;		/* length of mblk component */
239 	uint64_t ce_pa;		/* physical address */
240 } cmdQ_ce_t;
241 
242 /*
243  * command queue control structure
244  */
245 typedef struct cmdQ {
246 	u32 cq_credits;		/* # available descriptors for Xmit */
247 	u32 cq_asleep;		/* HW DMA Fetch status */
248 	u32 cq_pio_pidx;	/* Variable updated on Doorbell */
249 	u32 cq_entries_n;	/* # entries for Xmit */
250 	u32 cq_pidx;		/* producer index (SW) */
251 	u32 cq_complete;		/* Shadow consumer index (HW) */
252 	u32 cq_cidx;		/* consumer index (HW) */
253 	u32 cq_genbit;		/* current generation (=valid) bit */
254 	cmdQ_e *cq_entries;
255 	cmdQ_ce_t *cq_centries;
256 	spinlock_t cq_qlock;
257 	uint64_t cq_pa;		/* may not be needed */
258 	ulong_t cq_dh;
259 	ulong_t cq_ah;		/* may not be needed */
260 } cmdQ_t;
261 
262 /*
263  * free list queue control structure
264  */
265 typedef struct freelQ {
266 	u32 fq_id;	/* 0 queue 0, 1 queue 1 */
267 	u32 fq_credits;	/* # available RX buffer descriptors */
268 	u32 fq_entries_n;	/* # RX buffer descriptors */
269 	u32 fq_pidx;	    /* producer index (SW) */
270 	u32 fq_cidx;	    /* consumer index (HW) */
271 	u32 fq_genbit;	  /* current generation (=valid) bit */
272 	u32 fq_rx_buffer_size;  /* size buffer on this freelist */
273 	freelQ_e *fq_entries;   /* HW freelist descriptor Q */
274 	struct freelQ_ce *fq_centries;  /* SW freelist conext descriptor Q */
275 	uint64_t fq_pa;	 /* may not be needed */
276 	ulong_t fq_dh;
277 	ulong_t fq_ah;
278 	u32 fq_pause_on_thresh;
279 	u32 fq_pause_off_thresh;
280 } freelQ_t;
281 
282 /*
283  * response queue control structure
284  */
285 typedef struct respQ {
286 	u32 rq_credits;	 /* # avail response Q entries */
287 	u32 rq_credits_pend;    /* # not yet returned entries */
288 	u32 rq_credits_thresh;  /* return threshold */
289 	u32 rq_entries_n;	/* # response Q descriptors */
290 	u32 rq_pidx;	    /* producer index (HW) */
291 	u32 rq_cidx;	    /* consumer index (SW) */
292 	u32 rq_genbit;	  /* current generation(=valid) bit */
293 	respQ_e *rq_entries;    /* HW response Q */
294 	uint64_t rq_pa;	 /* may not be needed */
295 	ulong_t rq_dh;
296 	ulong_t rq_ah;
297 } reapQ_t;
298 
299 struct sge_intr_counts {
300 	uint32_t respQ_empty;		/* # times respQ empty */
301 	uint32_t respQ_overflow;	/* # respQ overflow (fatal) */
302 	uint32_t freelistQ_empty;	/* # times freelist empty */
303 	uint32_t pkt_too_big;		/* packet too large (fatal) */
304 	uint32_t pkt_mismatch;
305 	uint32_t cmdQ_full[2];		/* not HW intr, host cmdQ[] full */
306 	uint32_t tx_reclaims[2];
307 	uint32_t tx_msg_pullups;	/* # of tx pkt coelescing events */
308 	uint32_t tx_hdr_pullups;	/* # of tx hdr coelescing events */
309 	uint32_t tx_tcp_ip_frag;	/* # of ip fragmentes for tcp data */
310 	uint32_t tx_udp_ip_frag;	/* # of ip fragmentes for udp data */
311 	uint32_t tx_soft_cksums;	/* # of Software checksums done. */
312 	uint32_t tx_need_cpl_space;	/* # of allocs for cpl header */
313 	uint32_t tx_multi_mblks;	/* # of Multi mblk packets */
314 	uint32_t tx_no_dvma1;		/* # of dvma mapping failures */
315 	uint32_t tx_no_dvma2;		/* # of dvma mapping failures */
316 	uint32_t tx_no_dma1;		/* # of dma mapping failures */
317 	uint32_t tx_no_dma2;		/* # of dma mapping failures */
318 	uint32_t rx_cmdq0;		/* # of Qsleeping CMDQ0's */
319 	uint32_t rx_cmdq1;		/* # of Qsleeping CMDQ1's */
320 	uint32_t rx_flq0;		/* # of Qsleeping FL0's */
321 	uint32_t rx_flq1;		/* # of Qsleeping FL1's */
322 	uint32_t rx_flq0_sz;		/* size of freelist-0 buffers */
323 	uint32_t rx_flq1_sz;		/* size of freelist-1 buffers */
324 	uint32_t rx_pkt_drops;		/* # intentionally dropped packets */
325 	uint32_t rx_pkt_copied;		/* # times packets copied by sge */
326 	uint32_t rx_pause_on;		/* # of system pause on's required. */
327 	uint32_t rx_pause_off;		/* # of system pauses off's required. */
328 	uint32_t rx_pause_ms;		/* micro seconds while paused */
329 	uint32_t rx_pause_spike;	/* maximum time paused */
330 	uint32_t rx_fl_credits;		/* Current free list credit usage. */
331 	uint32_t rx_flbuf_fails;	/* # of freelist buf alloc fails. */
332 	uint32_t rx_flbuf_allocs;	/* # of freelist buf allocs. */
333 	uint32_t rx_badEopSop;		/* # of times bad Eop/Sop received */
334 	uint32_t rx_flq0_cnt;	/* # of times free list Q 0 entry used */
335 	uint32_t rx_flq1_cnt;	/* # of times free list Q 1 entry used */
336 	uint32_t arp_sent;		/* # times arp packet sent */
337 #ifdef SUN_KSTATS
338 	uint32_t tx_doorbells;
339 	uint32_t intr_doorbells;
340 	uint32_t intr1_doorbells;
341 	uint32_t sleep_cnt;
342 	uint32_t pe_allocb_cnt;
343 	uint32_t tx_descs[MBLK_MAX];
344 #endif
345 };
346 
347 #ifdef SUN_KSTATS
348 typedef struct sge_intr_counts *p_ch_stats_t;
349 
350 /*
351  * Driver maintained kernel statistics.
352  */
353 typedef struct _ch_kstat_t {
354 	/*
355 	 * Link Input/Output stats
356 	 */
357 	kstat_named_t respQ_empty;	/* # times respQ empty */
358 	kstat_named_t respQ_overflow;	/* # respQ overflow (fatal) */
359 	kstat_named_t freelistQ_empty;	/* # times freelist empty */
360 	kstat_named_t pkt_too_big;	/* packet too large (fatal) */
361 	kstat_named_t pkt_mismatch;
362 	kstat_named_t cmdQ_full[2];	/* not HW intr, host cmdQ[] full */
363 	kstat_named_t tx_reclaims[2];	/* # of tx reclaims called */
364 	kstat_named_t tx_msg_pullups;	/* # of tx pkt coelescing events */
365 	kstat_named_t tx_hdr_pullups;	/* # of tx hdr coelescing events */
366 	kstat_named_t tx_tcp_ip_frag;	/* # of ip fragmentes for tcp data */
367 	kstat_named_t tx_udp_ip_frag;	/* # of ip fragmentes for udp data */
368 	kstat_named_t tx_soft_cksums;	/* # of Software checksums done. */
369 	kstat_named_t tx_need_cpl_space;	/* # of allocs for cpl header */
370 	kstat_named_t tx_multi_mblks;	/* # of multi fragment packets */
371 	kstat_named_t tx_no_dvma1;	/* # of dvma mapping failures */
372 	kstat_named_t tx_no_dvma2;	/* # of dvma mapping failures */
373 	kstat_named_t tx_no_dma1;	/* # of dma mapping failures */
374 	kstat_named_t tx_no_dma2;	/* # of dma mapping failures */
375 	kstat_named_t rx_cmdq0;		/* # times Qsleeping cmdq0 */
376 	kstat_named_t rx_cmdq1;		/* # times Qsleeping cmdq1 */
377 	kstat_named_t rx_flq0;		/* # times Qsleeping flq0 */
378 	kstat_named_t rx_flq0_sz;	/* size of freelist-0 buffers */
379 	kstat_named_t rx_flq1;		/* # times Qsleeping flq1 */
380 	kstat_named_t rx_flq1_sz;	/* size of freelist-1 buffers */
381 	kstat_named_t rx_pkt_drops;	/* # times packets dropped by sge */
382 	kstat_named_t rx_pkt_copied;	/* # intentionally copied packets */
383 	kstat_named_t rx_pause_on;	/* # of system pause on's required. */
384 	kstat_named_t rx_pause_off;	/* # of system pauses off's required. */
385 	kstat_named_t rx_pause_ms;	/* micro seconds while paused. */
386 	kstat_named_t rx_pause_spike;	/* maximum time paused. */
387 	kstat_named_t rx_fl_credits;	/* Current free list credit usage. */
388 	kstat_named_t rx_flbuf_fails;	/* # of freelist buf alloc fails. */
389 	kstat_named_t rx_flbuf_allocs;	/* # of freelist buf allocs. */
390 	kstat_named_t rx_badEopSop;	/* # of times bad Eop/Sop received */
391 	kstat_named_t rx_flq0_cnt; /* # of times free list Q 0 entry used */
392 	kstat_named_t rx_flq1_cnt; /* # of times free list Q 1 entry used */
393 	kstat_named_t arp_sent;		/* # times arp packet sent */
394 
395 	kstat_named_t tx_doorbells;
396 	kstat_named_t intr_doorbells;
397 	kstat_named_t intr1_doorbells;
398 	kstat_named_t sleep_cnt;
399 	kstat_named_t pe_allocb_cnt;
400 	kstat_named_t tx_descs[MBLK_MAX];
401 } ch_kstat_t;
402 typedef ch_kstat_t *p_ch_kstat_t;
403 #endif
404 
405 typedef struct _pesge {
406 	peobj *obj;			/* adapter backpointer */
407 	struct freelQ freelQ[2];	/* freelist Q(s) */
408 	struct respQ respQ;		/* response Q instatiation */
409 	uint32_t rx_pkt_pad;		/* RX padding for T2 packets (hw) */
410 	uint32_t rx_offset;		/* RX padding for T1 packets (sw) */
411 	uint32_t jumbo_fl;		/* jumbo freelist Q index */
412 	uint32_t intrtimer[SGE_INTR_MAXBUCKETS];	/* timer values */
413 	uint32_t currIndex;		/* current index into intrtimer[] */
414 	uint32_t intrtimer_nres;	/* no resource interrupt timer value */
415 	uint32_t sge_control;		/* shadow content of sge control reg */
416 	struct sge_intr_counts intr_cnt;
417 #ifdef SUN_KSTATS
418 	p_kstat_t ksp;
419 #endif
420 	ch_cyclic_t espi_wa_cyclic;
421 	uint32_t ptimeout;
422 	void *pskb;
423 	struct cmdQ cmdQ[2];	    /* command Q(s) */
424 	int do_udp_csum;
425 	int do_tcp_csum;
426 } _pesge;
427 
428 /*
429  * ce_flg flag values
430  */
431 #define	DH_DMA  1
432 #define	DH_DVMA 2
433 #define	DH_TOE  3
434 #define	DH_ARP  8
435 
436 typedef struct freelQ_ce {
437 	void *fe_mp;		/* head mblk of pkt */
438 	ulong_t fe_dh;		/* ddi dma handle */
439 	uint_t  fe_len;		/* length of mblk component */
440 	uint64_t fe_pa;		/* physical address */
441 } freelQ_ce_t;
442 
443 pesge *t1_sge_create(ch_t *, struct sge_params *);
444 
445 extern int  t1_sge_destroy(pesge* sge);
446 extern int  sge_data_out(pesge*, int,  mblk_t *, cmdQ_ce_t *, int, uint32_t);
447 extern int  sge_data_in(pesge *);
448 extern int  sge_start(pesge*);
449 extern int  sge_stop(pesge *);
450 extern int t1_sge_configure(pesge *sge, struct sge_params *p);
451 
452 extern int  t1_sge_intr_error_handler(pesge*);
453 extern int  t1_sge_intr_enable(pesge*);
454 extern int  t1_sge_intr_disable(pesge*);
455 extern int  t1_sge_intr_clear(pesge*);
456 extern u32  t1_sge_get_ptimeout(ch_t *);
457 extern void t1_sge_set_ptimeout(ch_t *, u32);
458 
459 extern struct sge_intr_counts *sge_get_stat(pesge *);
460 extern void sge_add_fake_arp(pesge *, void *);
461 
462 /*
463  * Default SGE settings
464  */
465 #define	SGE_CMDQ0_CNT	(512)
466 #define	SGE_FLQ0_CNT	(512)
467 #define	SGE_RESPQ_CNT	(1024)
468 
469 /*
470  * the structures below were taken from cpl5_cmd.h. It turns out that there
471  * is a number of   #includes    that causes build problems. For now, we're
472  * putting a private copy here. When the sge code is made common, then this
473  * problem will need to be resolved.
474  */
475 
476 typedef uint8_t  __u8;
477 typedef uint32_t __u32;
478 typedef uint16_t __u16;
479 
480 union opcode_tid {
481     __u32 opcode_tid;
482     __u8 opcode;
483 };
484 
485 /*
486  * We want this header's alignment to be no more stringent than 2-byte aligned.
487  * All fields are u8 or u16 except for the length.  However that field is not
488  * used so we break it into 2 16-bit parts to easily meet our alignment needs.
489  */
490 struct cpl_tx_pkt {
491     __u8 opcode;
492 #if BYTE_ORDER == BIG_ENDIAN
493     __u8 rsvd:1;
494     __u8 vlan_valid:1;
495     __u8 l4_csum_dis:1;
496     __u8 ip_csum_dis:1;
497     __u8 iff:4;
498 #else
499     __u8 iff:4;
500     __u8 ip_csum_dis:1;
501     __u8 l4_csum_dis:1;
502     __u8 vlan_valid:1;
503     __u8 rsvd:1;
504 #endif
505     __u16 vlan;
506     __u16 len_hi;
507     __u16 len_lo;
508 };
509 
510 #define	CPL_TX_PKT 0xb2
511 #define	SZ_CPL_TX_PKT CPL_FORMAT_0_SIZE
512 
513 struct cpl_rx_data {
514     union opcode_tid ot;
515     __u32 len;
516     __u32 seq;
517     __u16 urg;
518     __u8  rsvd;
519     __u8  status;
520 };
521 
522 struct cpl_rx_pkt {
523     __u8 opcode;
524 #if BYTE_ORDER == LITTLE_ENDIAN
525     __u8 iff:4;
526     __u8 csum_valid:1;
527     __u8 bad_pkt:1;
528     __u8 vlan_valid:1;
529     __u8 rsvd:1;
530 #else
531     __u8 rsvd:1;
532     __u8 vlan_valid:1;
533     __u8 bad_pkt:1;
534     __u8 csum_valid:1;
535     __u8 iff:4;
536 #endif
537     __u16 csum;
538     __u16 vlan;
539     __u16 len;
540 };
541 
542 #ifdef __cplusplus
543 }
544 #endif
545 
546 #endif /* _CHELSIO_SGE_H */
547