cxgb_osdep.h (35f70ae1d3b9ebcc2d2d3dcf8be4032f10970156) | cxgb_osdep.h (8090c9f504c0c19831713ab2392d0993a5fc5b36) |
---|---|
1/************************************************************************** 2 3Copyright (c) 2007, Chelsio Inc. 4All rights reserved. 5 6Redistribution and use in source and binary forms, with or without 7modification, are permitted provided that the following conditions are met: 8 --- 22 unchanged lines hidden (view full) --- 31***************************************************************************/ 32 33#include <sys/param.h> 34#include <sys/systm.h> 35#include <sys/ctype.h> 36#include <sys/endian.h> 37#include <sys/bus.h> 38 | 1/************************************************************************** 2 3Copyright (c) 2007, Chelsio Inc. 4All rights reserved. 5 6Redistribution and use in source and binary forms, with or without 7modification, are permitted provided that the following conditions are met: 8 --- 22 unchanged lines hidden (view full) --- 31***************************************************************************/ 32 33#include <sys/param.h> 34#include <sys/systm.h> 35#include <sys/ctype.h> 36#include <sys/endian.h> 37#include <sys/bus.h> 38 |
39#include <sys/lock.h> 40#include <sys/mutex.h> 41 |
|
39#include <dev/mii/mii.h> 40 41#ifdef CONFIG_DEFINED 42#include <common/cxgb_version.h> 43#include <cxgb_config.h> 44#else 45#include <dev/cxgb/common/cxgb_version.h> 46#include <dev/cxgb/cxgb_config.h> 47#endif 48 49#ifndef _CXGB_OSDEP_H_ 50#define _CXGB_OSDEP_H_ 51 52typedef struct adapter adapter_t; 53struct sge_rspq; 54 | 42#include <dev/mii/mii.h> 43 44#ifdef CONFIG_DEFINED 45#include <common/cxgb_version.h> 46#include <cxgb_config.h> 47#else 48#include <dev/cxgb/common/cxgb_version.h> 49#include <dev/cxgb/cxgb_config.h> 50#endif 51 52#ifndef _CXGB_OSDEP_H_ 53#define _CXGB_OSDEP_H_ 54 55typedef struct adapter adapter_t; 56struct sge_rspq; 57 |
58 |
|
55struct t3_mbuf_hdr { 56 struct mbuf *mh_head; 57 struct mbuf *mh_tail; 58}; 59 | 59struct t3_mbuf_hdr { 60 struct mbuf *mh_head; 61 struct mbuf *mh_tail; 62}; 63 |
60 | |
61#define PANIC_IF(exp) do { \ 62 if (exp) \ 63 panic("BUG: %s", #exp); \ 64} while (0) 65 | 64#define PANIC_IF(exp) do { \ 65 if (exp) \ 66 panic("BUG: %s", #exp); \ 67} while (0) 68 |
66 | |
67#define m_get_priority(m) ((uintptr_t)(m)->m_pkthdr.rcvif) 68#define m_set_priority(m, pri) ((m)->m_pkthdr.rcvif = (struct ifnet *)((uintptr_t)pri)) 69#define m_set_sgl(m, sgl) ((m)->m_pkthdr.header = (sgl)) 70#define m_get_sgl(m) ((bus_dma_segment_t *)(m)->m_pkthdr.header) 71#define m_set_sgllen(m, len) ((m)->m_pkthdr.ether_vtag = len) 72#define m_get_sgllen(m) ((m)->m_pkthdr.ether_vtag) 73 74/* --- 33 unchanged lines hidden (view full) --- 108 * Workaround for weird Chelsio issue 109 */ 110#if __FreeBSD_version > 700029 111#define PRIV_SUPPORTED 112#endif 113 114#define CXGB_TX_CLEANUP_THRESHOLD 32 115 | 69#define m_get_priority(m) ((uintptr_t)(m)->m_pkthdr.rcvif) 70#define m_set_priority(m, pri) ((m)->m_pkthdr.rcvif = (struct ifnet *)((uintptr_t)pri)) 71#define m_set_sgl(m, sgl) ((m)->m_pkthdr.header = (sgl)) 72#define m_get_sgl(m) ((bus_dma_segment_t *)(m)->m_pkthdr.header) 73#define m_set_sgllen(m, len) ((m)->m_pkthdr.ether_vtag = len) 74#define m_get_sgllen(m) ((m)->m_pkthdr.ether_vtag) 75 76/* --- 33 unchanged lines hidden (view full) --- 110 * Workaround for weird Chelsio issue 111 */ 112#if __FreeBSD_version > 700029 113#define PRIV_SUPPORTED 114#endif 115 116#define CXGB_TX_CLEANUP_THRESHOLD 32 117 |
118 |
|
116#ifdef DEBUG_PRINT 117#define DPRINTF printf 118#else 119#define DPRINTF(...) 120#endif 121 122#define TX_MAX_SIZE (1 << 16) /* 64KB */ 123#define TX_MAX_SEGS 36 /* maximum supported by card */ | 119#ifdef DEBUG_PRINT 120#define DPRINTF printf 121#else 122#define DPRINTF(...) 123#endif 124 125#define TX_MAX_SIZE (1 << 16) /* 64KB */ 126#define TX_MAX_SEGS 36 /* maximum supported by card */ |
127 |
|
124#define TX_MAX_DESC 4 /* max descriptors per packet */ 125 | 128#define TX_MAX_DESC 4 /* max descriptors per packet */ 129 |
130 |
|
126#define TX_START_MIN_DESC (TX_MAX_DESC << 2) 127 | 131#define TX_START_MIN_DESC (TX_MAX_DESC << 2) 132 |
128#if 0 129#define TX_START_MAX_DESC (TX_ETH_Q_SIZE >> 2) /* maximum number of descriptors */ 130#endif | |
131 | 133 |
134 |
|
132#define TX_START_MAX_DESC (TX_MAX_DESC << 3) /* maximum number of descriptors 133 * call to start used per */ 134 135#define TX_CLEAN_MAX_DESC (TX_MAX_DESC << 4) /* maximum tx descriptors 136 * to clean per iteration */ | 135#define TX_START_MAX_DESC (TX_MAX_DESC << 3) /* maximum number of descriptors 136 * call to start used per */ 137 138#define TX_CLEAN_MAX_DESC (TX_MAX_DESC << 4) /* maximum tx descriptors 139 * to clean per iteration */ |
140#define TX_WR_SIZE_MAX 11*1024 /* the maximum total size of packets aggregated into a single 141 * TX WR 142 */ 143#define TX_WR_COUNT_MAX 7 /* the maximum total number of packets that can be 144 * aggregated into a single TX WR 145 */ |
|
137 138 139#if defined(__i386__) || defined(__amd64__) 140#define mb() __asm volatile("mfence":::"memory") 141#define rmb() __asm volatile("lfence":::"memory") 142#define wmb() __asm volatile("sfence" ::: "memory") 143#define smp_mb() mb() 144 | 146 147 148#if defined(__i386__) || defined(__amd64__) 149#define mb() __asm volatile("mfence":::"memory") 150#define rmb() __asm volatile("lfence":::"memory") 151#define wmb() __asm volatile("sfence" ::: "memory") 152#define smp_mb() mb() 153 |
145#define L1_CACHE_BYTES 64 | 154#define L1_CACHE_BYTES 128 |
146static __inline 147void prefetch(void *x) 148{ 149 __asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x)); 150} 151 152extern void kdb_backtrace(void); 153 --- 8 unchanged lines hidden (view full) --- 162#else /* !i386 && !amd64 */ 163#define mb() 164#define rmb() 165#define wmb() 166#define smp_mb() 167#define prefetch(x) 168#define L1_CACHE_BYTES 32 169#endif | 155static __inline 156void prefetch(void *x) 157{ 158 __asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x)); 159} 160 161extern void kdb_backtrace(void); 162 --- 8 unchanged lines hidden (view full) --- 171#else /* !i386 && !amd64 */ 172#define mb() 173#define rmb() 174#define wmb() 175#define smp_mb() 176#define prefetch(x) 177#define L1_CACHE_BYTES 32 178#endif |
179 180struct buf_ring { 181 caddr_t *br_ring; 182 volatile uint32_t br_cons; 183 volatile uint32_t br_prod; 184 int br_size; 185 struct mtx br_lock; 186}; 187 188struct buf_ring *buf_ring_alloc(int count, int flags); 189void buf_ring_free(struct buf_ring *); 190 191static __inline int 192buf_ring_count(struct buf_ring *mr) 193{ 194 int size = mr->br_size; 195 int mask = size - 1; 196 197 return ((size + mr->br_prod - mr->br_cons) & mask); 198} 199 200static __inline int 201buf_ring_empty(struct buf_ring *mr) 202{ 203 return (mr->br_cons == mr->br_prod); 204} 205 206/* 207 * The producer and consumer are independently locked 208 * this relies on the consumer providing his own serialization 209 * 210 */ 211static __inline void * 212buf_ring_dequeue(struct buf_ring *mr) 213{ 214 int prod, cons, mask; 215 caddr_t *ring, m; 216 217 ring = (caddr_t *)mr->br_ring; 218 mask = mr->br_size - 1; 219 cons = mr->br_cons; 220 prod = mr->br_prod; 221 m = NULL; 222 if (cons != prod) { 223 m = ring[cons]; 224 mr->br_cons = (cons + 1) & mask; 225 mb(); 226 } 227 return (m); 228} 229 230 231static __inline int 232__buf_ring_enqueue(struct buf_ring *mr, void *m) 233{ 234 235 int prod, cons, mask, err; 236 237 cons = mr->br_cons; 238 prod = mr->br_prod; 239 mask = mr->br_size - 1; 240 if (((prod + 1) & mask) != cons) { 241 mr->br_ring[prod] = m; 242 mb(); 243 mr->br_prod = (prod + 1) & mask; 244 err = 0; 245 } else 246 err = ENOBUFS; 247 248 return (err); 249} 250 251static __inline int 252buf_ring_enqueue(struct buf_ring *mr, void *m) 253{ 254 int err; 255 256 mtx_lock(&mr->br_lock); 257 err = __buf_ring_enqueue(mr, m); 258 mtx_unlock(&mr->br_lock); 259 260 return (err); 261} 262 263static __inline void * 264buf_ring_peek(struct buf_ring *mr) 265{ 266 int prod, cons, mask; 267 caddr_t *ring, m; 268 269 ring = (caddr_t *)mr->br_ring; 270 mask = mr->br_size - 1; 271 cons = mr->br_cons; 272 prod = mr->br_prod; 273 m = NULL; 274 if (cons != prod) 275 m = ring[cons]; 276 277 return (m); 278} 279 |
|
170#define DBG_RX (1 << 0) 171static const int debug_flags = DBG_RX; 172 173#ifdef DEBUG_PRINT 174#define DBG(flag, msg) do { \ 175 if ((flag & debug_flags)) \ 176 printf msg; \ 177} while (0) --- 6 unchanged lines hidden (view full) --- 184 185#define CH_ERR(adap, fmt, ...)device_printf(adap->dev, fmt, ##__VA_ARGS__); 186 187#define CH_WARN(adap, fmt, ...) device_printf(adap->dev, fmt, ##__VA_ARGS__) 188#define CH_ALERT(adap, fmt, ...) device_printf(adap->dev, fmt, ##__VA_ARGS__) 189 190#define t3_os_sleep(x) DELAY((x) * 1000) 191 | 280#define DBG_RX (1 << 0) 281static const int debug_flags = DBG_RX; 282 283#ifdef DEBUG_PRINT 284#define DBG(flag, msg) do { \ 285 if ((flag & debug_flags)) \ 286 printf msg; \ 287} while (0) --- 6 unchanged lines hidden (view full) --- 294 295#define CH_ERR(adap, fmt, ...)device_printf(adap->dev, fmt, ##__VA_ARGS__); 296 297#define CH_WARN(adap, fmt, ...) device_printf(adap->dev, fmt, ##__VA_ARGS__) 298#define CH_ALERT(adap, fmt, ...) device_printf(adap->dev, fmt, ##__VA_ARGS__) 299 300#define t3_os_sleep(x) DELAY((x) * 1000) 301 |
192#define test_and_clear_bit(bit, p) atomic_cmpset_int((p), ((*(p)) | bit), ((*(p)) & ~bit)) | 302#define test_and_clear_bit(bit, p) atomic_cmpset_int((p), ((*(p)) | (1<<bit)), ((*(p)) & ~(1<<bit))) |
193 | 303 |
194 | |
195#define max_t(type, a, b) (type)max((a), (b)) 196#define net_device ifnet 197#define cpu_to_be32 htobe32 198 | 304#define max_t(type, a, b) (type)max((a), (b)) 305#define net_device ifnet 306#define cpu_to_be32 htobe32 307 |
199 200 | |
201/* Standard PHY definitions */ 202#define BMCR_LOOPBACK BMCR_LOOP 203#define BMCR_ISOLATE BMCR_ISO 204#define BMCR_ANENABLE BMCR_AUTOEN 205#define BMCR_SPEED1000 BMCR_SPEED1 206#define BMCR_SPEED100 BMCR_SPEED0 207#define BMCR_ANRESTART BMCR_STARTNEG 208#define BMCR_FULLDPLX BMCR_FDX --- 33 unchanged lines hidden (view full) --- 242#define __devinit 243#define udelay(x) DELAY(x) 244#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) 245#define le32_to_cpu(x) le32toh(x) 246#define cpu_to_le32(x) htole32(x) 247#define swab32(x) bswap32(x) 248#define simple_strtoul strtoul 249 | 308/* Standard PHY definitions */ 309#define BMCR_LOOPBACK BMCR_LOOP 310#define BMCR_ISOLATE BMCR_ISO 311#define BMCR_ANENABLE BMCR_AUTOEN 312#define BMCR_SPEED1000 BMCR_SPEED1 313#define BMCR_SPEED100 BMCR_SPEED0 314#define BMCR_ANRESTART BMCR_STARTNEG 315#define BMCR_FULLDPLX BMCR_FDX --- 33 unchanged lines hidden (view full) --- 349#define __devinit 350#define udelay(x) DELAY(x) 351#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) 352#define le32_to_cpu(x) le32toh(x) 353#define cpu_to_le32(x) htole32(x) 354#define swab32(x) bswap32(x) 355#define simple_strtoul strtoul 356 |
250/* More types and endian definitions */ | 357 |
251typedef uint8_t u8; 252typedef uint16_t u16; 253typedef uint32_t u32; 254typedef uint64_t u64; | 358typedef uint8_t u8; 359typedef uint16_t u16; 360typedef uint32_t u32; 361typedef uint64_t u64; |
255 256typedef uint8_t __u8; | 362 363typedef uint8_t __u8; |
257typedef uint16_t __u16; 258typedef uint32_t __u32; 259typedef uint8_t __be8; 260typedef uint16_t __be16; 261typedef uint32_t __be32; 262typedef uint64_t __be64; 263 | 364typedef uint16_t __u16; 365typedef uint32_t __u32; 366typedef uint8_t __be8; 367typedef uint16_t __be16; 368typedef uint32_t __be32; 369typedef uint64_t __be64; 370 |
371 |
|
264#if BYTE_ORDER == BIG_ENDIAN 265#define __BIG_ENDIAN_BITFIELD 266#elif BYTE_ORDER == LITTLE_ENDIAN 267#define __LITTLE_ENDIAN_BITFIELD 268#else 269#error "Must set BYTE_ORDER" 270#endif 271 --- 48 unchanged lines hidden --- | 372#if BYTE_ORDER == BIG_ENDIAN 373#define __BIG_ENDIAN_BITFIELD 374#elif BYTE_ORDER == LITTLE_ENDIAN 375#define __LITTLE_ENDIAN_BITFIELD 376#else 377#error "Must set BYTE_ORDER" 378#endif 379 --- 48 unchanged lines hidden --- |