1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
4 <http://rt2x00.serialmonkey.com>
5
6 */
7
8 /*
9 Module: rt2x00
10 Abstract: rt2x00 queue datastructures and routines
11 */
12
13 #ifndef RT2X00QUEUE_H
14 #define RT2X00QUEUE_H
15
16 #include <linux/prefetch.h>
17
18 /**
19 * DOC: Entry frame size
20 *
21 * Ralink PCI devices demand the Frame size to be a multiple of 128 bytes,
22 * for USB devices this restriction does not apply, but the value of
23 * 2432 makes sense since it is big enough to contain the maximum fragment
24 * size according to the ieee802.11 specs.
25 * The aggregation size depends on support from the driver, but should
26 * be something around 3840 bytes.
27 */
28 #define DATA_FRAME_SIZE 2432
29 #define MGMT_FRAME_SIZE 256
30 #define AGGREGATION_SIZE 3840
31
32 /**
33 * enum data_queue_qid: Queue identification
34 *
35 * @QID_AC_VO: AC VO queue
36 * @QID_AC_VI: AC VI queue
37 * @QID_AC_BE: AC BE queue
38 * @QID_AC_BK: AC BK queue
39 * @QID_HCCA: HCCA queue
40 * @QID_MGMT: MGMT queue (prio queue)
41 * @QID_RX: RX queue
42 * @QID_OTHER: None of the above (don't use, only present for completeness)
43 * @QID_BEACON: Beacon queue (value unspecified, don't send it to device)
44 * @QID_ATIM: Atim queue (value unspecified, don't send it to device)
45 */
46 enum data_queue_qid {
47 QID_AC_VO = 0,
48 QID_AC_VI = 1,
49 QID_AC_BE = 2,
50 QID_AC_BK = 3,
51 QID_HCCA = 4,
52 QID_MGMT = 13,
53 QID_RX = 14,
54 QID_OTHER = 15,
55 QID_BEACON,
56 QID_ATIM,
57 };
58
59 /**
60 * enum skb_frame_desc_flags: Flags for &struct skb_frame_desc
61 *
62 * @SKBDESC_DMA_MAPPED_RX: &skb_dma field has been mapped for RX
63 * @SKBDESC_DMA_MAPPED_TX: &skb_dma field has been mapped for TX
64 * @SKBDESC_IV_STRIPPED: Frame contained a IV/EIV provided by
65 * mac80211 but was stripped for processing by the driver.
66 * @SKBDESC_NOT_MAC80211: Frame didn't originate from mac80211,
67 * don't try to pass it back.
68 * @SKBDESC_DESC_IN_SKB: The descriptor is at the start of the
69 * skb, instead of in the desc field.
70 */
71 enum skb_frame_desc_flags {
72 SKBDESC_DMA_MAPPED_RX = 1 << 0,
73 SKBDESC_DMA_MAPPED_TX = 1 << 1,
74 SKBDESC_IV_STRIPPED = 1 << 2,
75 SKBDESC_NOT_MAC80211 = 1 << 3,
76 SKBDESC_DESC_IN_SKB = 1 << 4,
77 };
78
79 /**
80 * struct skb_frame_desc: Descriptor information for the skb buffer
81 *
82 * This structure is placed over the driver_data array, this means that
83 * this structure should not exceed the size of that array (40 bytes).
84 *
85 * @flags: Frame flags, see &enum skb_frame_desc_flags.
86 * @desc_len: Length of the frame descriptor.
87 * @tx_rate_idx: the index of the TX rate, used for TX status reporting
88 * @tx_rate_flags: the TX rate flags, used for TX status reporting
89 * @desc: Pointer to descriptor part of the frame.
90 * Note that this pointer could point to something outside
91 * of the scope of the skb->data pointer.
92 * @iv: IV/EIV data used during encryption/decryption.
93 * @skb_dma: (PCI-only) the DMA address associated with the sk buffer.
94 * @sta: The station where sk buffer was sent.
95 */
96 struct skb_frame_desc {
97 u8 flags;
98
99 u8 desc_len;
100 u8 tx_rate_idx;
101 u8 tx_rate_flags;
102
103 void *desc;
104
105 __le32 iv[2];
106
107 dma_addr_t skb_dma;
108 struct ieee80211_sta *sta;
109 };
110
111 /**
112 * get_skb_frame_desc - Obtain the rt2x00 frame descriptor from a sk_buff.
113 * @skb: &struct sk_buff from where we obtain the &struct skb_frame_desc
114 */
get_skb_frame_desc(struct sk_buff * skb)115 static inline struct skb_frame_desc* get_skb_frame_desc(struct sk_buff *skb)
116 {
117 BUILD_BUG_ON(sizeof(struct skb_frame_desc) >
118 IEEE80211_TX_INFO_DRIVER_DATA_SIZE);
119 return (struct skb_frame_desc *)&IEEE80211_SKB_CB(skb)->driver_data;
120 }
121
122 /**
123 * enum rxdone_entry_desc_flags: Flags for &struct rxdone_entry_desc
124 *
125 * @RXDONE_SIGNAL_PLCP: Signal field contains the plcp value.
126 * @RXDONE_SIGNAL_BITRATE: Signal field contains the bitrate value.
127 * @RXDONE_SIGNAL_MCS: Signal field contains the mcs value.
128 * @RXDONE_MY_BSS: Does this frame originate from device's BSS.
129 * @RXDONE_CRYPTO_IV: Driver provided IV/EIV data.
130 * @RXDONE_CRYPTO_ICV: Driver provided ICV data.
131 * @RXDONE_L2PAD: 802.11 payload has been padded to 4-byte boundary.
132 */
133 enum rxdone_entry_desc_flags {
134 RXDONE_SIGNAL_PLCP = BIT(0),
135 RXDONE_SIGNAL_BITRATE = BIT(1),
136 RXDONE_SIGNAL_MCS = BIT(2),
137 RXDONE_MY_BSS = BIT(3),
138 RXDONE_CRYPTO_IV = BIT(4),
139 RXDONE_CRYPTO_ICV = BIT(5),
140 RXDONE_L2PAD = BIT(6),
141 };
142
143 /**
144 * RXDONE_SIGNAL_MASK - Define to mask off all &rxdone_entry_desc_flags flags
145 * except for the RXDONE_SIGNAL_* flags. This is useful to convert the dev_flags
146 * from &rxdone_entry_desc to a signal value type.
147 */
148 #define RXDONE_SIGNAL_MASK \
149 ( RXDONE_SIGNAL_PLCP | RXDONE_SIGNAL_BITRATE | RXDONE_SIGNAL_MCS )
150
151 /**
152 * struct rxdone_entry_desc: RX Entry descriptor
153 *
154 * Summary of information that has been read from the RX frame descriptor.
155 *
156 * @timestamp: RX Timestamp
157 * @signal: Signal of the received frame.
158 * @rssi: RSSI of the received frame.
159 * @size: Data size of the received frame.
160 * @flags: MAC80211 receive flags (See &enum mac80211_rx_flags).
161 * @dev_flags: Ralink receive flags (See &enum rxdone_entry_desc_flags).
162 * @rate_mode: Rate mode (See @enum rate_modulation).
163 * @cipher: Cipher type used during decryption.
164 * @cipher_status: Decryption status.
165 * @iv: IV/EIV data used during decryption.
166 * @icv: ICV data used during decryption.
167 */
168 struct rxdone_entry_desc {
169 u64 timestamp;
170 int signal;
171 int rssi;
172 int size;
173 int flags;
174 int dev_flags;
175 u16 rate_mode;
176 u16 enc_flags;
177 enum mac80211_rx_encoding encoding;
178 enum rate_info_bw bw;
179 u8 cipher;
180 u8 cipher_status;
181
182 __le32 iv[2];
183 __le32 icv;
184 };
185
186 /**
187 * enum txdone_entry_desc_flags: Flags for &struct txdone_entry_desc
188 *
189 * Every txdone report has to contain the basic result of the
190 * transmission, either &TXDONE_UNKNOWN, &TXDONE_SUCCESS or
191 * &TXDONE_FAILURE. The flag &TXDONE_FALLBACK can be used in
192 * conjunction with all of these flags but should only be set
193 * if retires > 0. The flag &TXDONE_EXCESSIVE_RETRY can only be used
194 * in conjunction with &TXDONE_FAILURE.
195 *
196 * @TXDONE_UNKNOWN: Hardware could not determine success of transmission.
197 * @TXDONE_SUCCESS: Frame was successfully send
198 * @TXDONE_FALLBACK: Hardware used fallback rates for retries
199 * @TXDONE_FAILURE: Frame was not successfully send
200 * @TXDONE_EXCESSIVE_RETRY: In addition to &TXDONE_FAILURE, the
201 * frame transmission failed due to excessive retries.
202 */
203 enum txdone_entry_desc_flags {
204 TXDONE_UNKNOWN,
205 TXDONE_SUCCESS,
206 TXDONE_FALLBACK,
207 TXDONE_FAILURE,
208 TXDONE_EXCESSIVE_RETRY,
209 TXDONE_AMPDU,
210 TXDONE_NO_ACK_REQ,
211 };
212
213 /**
214 * struct txdone_entry_desc: TX done entry descriptor
215 *
216 * Summary of information that has been read from the TX frame descriptor
217 * after the device is done with transmission.
218 *
219 * @flags: TX done flags (See &enum txdone_entry_desc_flags).
220 * @retry: Retry count.
221 */
222 struct txdone_entry_desc {
223 unsigned long flags;
224 int retry;
225 };
226
227 /**
228 * enum txentry_desc_flags: Status flags for TX entry descriptor
229 *
230 * @ENTRY_TXD_RTS_FRAME: This frame is a RTS frame.
231 * @ENTRY_TXD_CTS_FRAME: This frame is a CTS-to-self frame.
232 * @ENTRY_TXD_GENERATE_SEQ: This frame requires sequence counter.
233 * @ENTRY_TXD_FIRST_FRAGMENT: This is the first frame.
234 * @ENTRY_TXD_MORE_FRAG: This frame is followed by another fragment.
235 * @ENTRY_TXD_REQ_TIMESTAMP: Require timestamp to be inserted.
236 * @ENTRY_TXD_BURST: This frame belongs to the same burst event.
237 * @ENTRY_TXD_ACK: An ACK is required for this frame.
238 * @ENTRY_TXD_RETRY_MODE: When set, the long retry count is used.
239 * @ENTRY_TXD_ENCRYPT: This frame should be encrypted.
240 * @ENTRY_TXD_ENCRYPT_PAIRWISE: Use pairwise key table (instead of shared).
241 * @ENTRY_TXD_ENCRYPT_IV: Generate IV/EIV in hardware.
242 * @ENTRY_TXD_ENCRYPT_MMIC: Generate MIC in hardware.
243 * @ENTRY_TXD_HT_AMPDU: This frame is part of an AMPDU.
244 * @ENTRY_TXD_HT_BW_40: Use 40MHz Bandwidth.
245 * @ENTRY_TXD_HT_SHORT_GI: Use short GI.
246 * @ENTRY_TXD_HT_MIMO_PS: The receiving STA is in dynamic SM PS mode.
247 */
248 enum txentry_desc_flags {
249 ENTRY_TXD_RTS_FRAME,
250 ENTRY_TXD_CTS_FRAME,
251 ENTRY_TXD_GENERATE_SEQ,
252 ENTRY_TXD_FIRST_FRAGMENT,
253 ENTRY_TXD_MORE_FRAG,
254 ENTRY_TXD_REQ_TIMESTAMP,
255 ENTRY_TXD_BURST,
256 ENTRY_TXD_ACK,
257 ENTRY_TXD_RETRY_MODE,
258 ENTRY_TXD_ENCRYPT,
259 ENTRY_TXD_ENCRYPT_PAIRWISE,
260 ENTRY_TXD_ENCRYPT_IV,
261 ENTRY_TXD_ENCRYPT_MMIC,
262 ENTRY_TXD_HT_AMPDU,
263 ENTRY_TXD_HT_BW_40,
264 ENTRY_TXD_HT_SHORT_GI,
265 ENTRY_TXD_HT_MIMO_PS,
266 };
267
268 /**
269 * struct txentry_desc: TX Entry descriptor
270 *
271 * Summary of information for the frame descriptor before sending a TX frame.
272 *
273 * @flags: Descriptor flags (See &enum queue_entry_flags).
274 * @length: Length of the entire frame.
275 * @header_length: Length of 802.11 header.
276 * @length_high: PLCP length high word.
277 * @length_low: PLCP length low word.
278 * @signal: PLCP signal.
279 * @service: PLCP service.
280 * @msc: MCS.
281 * @stbc: Use Space Time Block Coding (only available for MCS rates < 8).
282 * @ba_size: Size of the recepients RX reorder buffer - 1.
283 * @rate_mode: Rate mode (See @enum rate_modulation).
284 * @mpdu_density: MDPU density.
285 * @retry_limit: Max number of retries.
286 * @ifs: IFS value.
287 * @txop: IFS value for 11n capable chips.
288 * @cipher: Cipher type used for encryption.
289 * @key_idx: Key index used for encryption.
290 * @iv_offset: Position where IV should be inserted by hardware.
291 * @iv_len: Length of IV data.
292 */
293 struct txentry_desc {
294 unsigned long flags;
295
296 u16 length;
297 u16 header_length;
298
299 union {
300 struct {
301 u16 length_high;
302 u16 length_low;
303 u16 signal;
304 u16 service;
305 enum ifs ifs;
306 } plcp;
307
308 struct {
309 u16 mcs;
310 u8 stbc;
311 u8 ba_size;
312 u8 mpdu_density;
313 enum txop txop;
314 int wcid;
315 } ht;
316 } u;
317
318 enum rate_modulation rate_mode;
319
320 short retry_limit;
321
322 enum cipher cipher;
323 u16 key_idx;
324 u16 iv_offset;
325 u16 iv_len;
326 };
327
328 /**
329 * enum queue_entry_flags: Status flags for queue entry
330 *
331 * @ENTRY_BCN_ASSIGNED: This entry has been assigned to an interface.
332 * As long as this bit is set, this entry may only be touched
333 * through the interface structure.
334 * @ENTRY_OWNER_DEVICE_DATA: This entry is owned by the device for data
335 * transfer (either TX or RX depending on the queue). The entry should
336 * only be touched after the device has signaled it is done with it.
337 * @ENTRY_DATA_PENDING: This entry contains a valid frame and is waiting
338 * for the signal to start sending.
339 * @ENTRY_DATA_IO_FAILED: Hardware indicated that an IO error occurred
340 * while transferring the data to the hardware. No TX status report will
341 * be expected from the hardware.
342 * @ENTRY_DATA_STATUS_PENDING: The entry has been send to the device and
343 * returned. It is now waiting for the status reporting before the
344 * entry can be reused again.
345 */
346 enum queue_entry_flags {
347 ENTRY_BCN_ASSIGNED,
348 ENTRY_BCN_ENABLED,
349 ENTRY_OWNER_DEVICE_DATA,
350 ENTRY_DATA_PENDING,
351 ENTRY_DATA_IO_FAILED,
352 ENTRY_DATA_STATUS_PENDING,
353 };
354
355 /**
356 * struct queue_entry: Entry inside the &struct data_queue
357 *
358 * @flags: Entry flags, see &enum queue_entry_flags.
359 * @last_action: Timestamp of last change.
360 * @queue: The data queue (&struct data_queue) to which this entry belongs.
361 * @skb: The buffer which is currently being transmitted (for TX queue),
362 * or used to directly receive data in (for RX queue).
363 * @entry_idx: The entry index number.
364 * @priv_data: Private data belonging to this queue entry. The pointer
365 * points to data specific to a particular driver and queue type.
366 * @status: Device specific status
367 */
368 struct queue_entry {
369 unsigned long flags;
370 unsigned long last_action;
371
372 struct data_queue *queue;
373
374 struct sk_buff *skb;
375
376 unsigned int entry_idx;
377
378 void *priv_data;
379 };
380
381 /**
382 * enum queue_index: Queue index type
383 *
384 * @Q_INDEX: Index pointer to the current entry in the queue, if this entry is
385 * owned by the hardware then the queue is considered to be full.
386 * @Q_INDEX_DMA_DONE: Index pointer for the next entry which will have been
387 * transferred to the hardware.
388 * @Q_INDEX_DONE: Index pointer to the next entry which will be completed by
389 * the hardware and for which we need to run the txdone handler. If this
390 * entry is not owned by the hardware the queue is considered to be empty.
391 * @Q_INDEX_MAX: Keep last, used in &struct data_queue to determine the size
392 * of the index array.
393 */
394 enum queue_index {
395 Q_INDEX,
396 Q_INDEX_DMA_DONE,
397 Q_INDEX_DONE,
398 Q_INDEX_MAX,
399 };
400
401 /**
402 * enum data_queue_flags: Status flags for data queues
403 *
404 * @QUEUE_STARTED: The queue has been started. Fox RX queues this means the
405 * device might be DMA'ing skbuffers. TX queues will accept skbuffers to
406 * be transmitted and beacon queues will start beaconing the configured
407 * beacons.
408 * @QUEUE_PAUSED: The queue has been started but is currently paused.
409 * When this bit is set, the queue has been stopped in mac80211,
410 * preventing new frames to be enqueued. However, a few frames
411 * might still appear shortly after the pausing...
412 */
413 enum data_queue_flags {
414 QUEUE_STARTED,
415 QUEUE_PAUSED,
416 };
417
418 /**
419 * struct data_queue: Data queue
420 *
421 * @rt2x00dev: Pointer to main &struct rt2x00dev where this queue belongs to.
422 * @entries: Base address of the &struct queue_entry which are
423 * part of this queue.
424 * @qid: The queue identification, see &enum data_queue_qid.
425 * @flags: Entry flags, see &enum queue_entry_flags.
426 * @status_lock: The mutex for protecting the start/stop/flush
427 * handling on this queue.
428 * @tx_lock: Spinlock to serialize tx operations on this queue.
429 * @index_lock: Spinlock to protect index handling. Whenever @index, @index_done or
430 * @index_crypt needs to be changed this lock should be grabbed to prevent
431 * index corruption due to concurrency.
432 * @count: Number of frames handled in the queue.
433 * @limit: Maximum number of entries in the queue.
434 * @threshold: Minimum number of free entries before queue is kicked by force.
435 * @length: Number of frames in queue.
436 * @index: Index pointers to entry positions in the queue,
437 * use &enum queue_index to get a specific index field.
438 * @wd_count: watchdog counter number of times entry does change
439 * in the queue
440 * @wd_idx: index of queue entry saved by watchdog
441 * @txop: maximum burst time.
442 * @aifs: The aifs value for outgoing frames (field ignored in RX queue).
443 * @cw_min: The cw min value for outgoing frames (field ignored in RX queue).
444 * @cw_max: The cw max value for outgoing frames (field ignored in RX queue).
445 * @data_size: Maximum data size for the frames in this queue.
446 * @desc_size: Hardware descriptor size for the data in this queue.
447 * @priv_size: Size of per-queue_entry private data.
448 * @usb_endpoint: Device endpoint used for communication (USB only)
449 * @usb_maxpacket: Max packet size for given endpoint (USB only)
450 */
451 struct data_queue {
452 struct rt2x00_dev *rt2x00dev;
453 struct queue_entry *entries;
454
455 enum data_queue_qid qid;
456 unsigned long flags;
457
458 struct mutex status_lock;
459 spinlock_t tx_lock;
460 spinlock_t index_lock;
461
462 unsigned int count;
463 unsigned short limit;
464 unsigned short threshold;
465 unsigned short length;
466 unsigned short index[Q_INDEX_MAX];
467
468 unsigned short wd_count;
469 unsigned int wd_idx;
470
471 unsigned short txop;
472 unsigned short aifs;
473 unsigned short cw_min;
474 unsigned short cw_max;
475
476 unsigned short data_size;
477 unsigned char desc_size;
478 unsigned char winfo_size;
479 unsigned short priv_size;
480
481 unsigned short usb_endpoint;
482 unsigned short usb_maxpacket;
483 };
484
485 /**
486 * queue_end - Return pointer to the last queue (HELPER MACRO).
487 * @__dev: Pointer to &struct rt2x00_dev
488 *
489 * Using the base rx pointer and the maximum number of available queues,
490 * this macro will return the address of 1 position beyond the end of the
491 * queues array.
492 */
493 #define queue_end(__dev) \
494 &(__dev)->rx[(__dev)->data_queues]
495
496 /**
497 * tx_queue_end - Return pointer to the last TX queue (HELPER MACRO).
498 * @__dev: Pointer to &struct rt2x00_dev
499 *
500 * Using the base tx pointer and the maximum number of available TX
501 * queues, this macro will return the address of 1 position beyond
502 * the end of the TX queue array.
503 */
504 #define tx_queue_end(__dev) \
505 &(__dev)->tx[(__dev)->ops->tx_queues]
506
507 /**
508 * queue_next - Return pointer to next queue in list (HELPER MACRO).
509 * @__queue: Current queue for which we need the next queue
510 *
511 * Using the current queue address we take the address directly
512 * after the queue to take the next queue. Note that this macro
513 * should be used carefully since it does not protect against
514 * moving past the end of the list. (See macros &queue_end and
515 * &tx_queue_end for determining the end of the queue).
516 */
517 #define queue_next(__queue) \
518 &(__queue)[1]
519
520 /**
521 * queue_loop - Loop through the queues within a specific range (HELPER MACRO).
522 * @__entry: Pointer where the current queue entry will be stored in.
523 * @__start: Start queue pointer.
524 * @__end: End queue pointer.
525 *
526 * This macro will loop through all queues between &__start and &__end.
527 */
528 #define queue_loop(__entry, __start, __end) \
529 for ((__entry) = (__start); \
530 prefetch(queue_next(__entry)), (__entry) != (__end);\
531 (__entry) = queue_next(__entry))
532
533 /**
534 * queue_for_each - Loop through all queues
535 * @__dev: Pointer to &struct rt2x00_dev
536 * @__entry: Pointer where the current queue entry will be stored in.
537 *
538 * This macro will loop through all available queues.
539 */
540 #define queue_for_each(__dev, __entry) \
541 queue_loop(__entry, (__dev)->rx, queue_end(__dev))
542
543 /**
544 * tx_queue_for_each - Loop through the TX queues
545 * @__dev: Pointer to &struct rt2x00_dev
546 * @__entry: Pointer where the current queue entry will be stored in.
547 *
548 * This macro will loop through all TX related queues excluding
549 * the Beacon and Atim queues.
550 */
551 #define tx_queue_for_each(__dev, __entry) \
552 queue_loop(__entry, (__dev)->tx, tx_queue_end(__dev))
553
554 /**
555 * txall_queue_for_each - Loop through all TX related queues
556 * @__dev: Pointer to &struct rt2x00_dev
557 * @__entry: Pointer where the current queue entry will be stored in.
558 *
559 * This macro will loop through all TX related queues including
560 * the Beacon and Atim queues.
561 */
562 #define txall_queue_for_each(__dev, __entry) \
563 queue_loop(__entry, (__dev)->tx, queue_end(__dev))
564
565 /**
566 * rt2x00queue_for_each_entry - Loop through all entries in the queue
567 * @queue: Pointer to @data_queue
568 * @start: &enum queue_index Pointer to start index
569 * @end: &enum queue_index Pointer to end index
570 * @data: Data to pass to the callback function
571 * @fn: The function to call for each &struct queue_entry
572 *
573 * This will walk through all entries in the queue, in chronological
574 * order. This means it will start at the current @start pointer
575 * and will walk through the queue until it reaches the @end pointer.
576 *
577 * If fn returns true for an entry rt2x00queue_for_each_entry will stop
578 * processing and return true as well.
579 */
580 bool rt2x00queue_for_each_entry(struct data_queue *queue,
581 enum queue_index start,
582 enum queue_index end,
583 void *data,
584 bool (*fn)(struct queue_entry *entry,
585 void *data));
586
587 /**
588 * rt2x00queue_empty - Check if the queue is empty.
589 * @queue: Queue to check if empty.
590 */
rt2x00queue_empty(struct data_queue * queue)591 static inline int rt2x00queue_empty(struct data_queue *queue)
592 {
593 return queue->length == 0;
594 }
595
596 /**
597 * rt2x00queue_full - Check if the queue is full.
598 * @queue: Queue to check if full.
599 */
rt2x00queue_full(struct data_queue * queue)600 static inline int rt2x00queue_full(struct data_queue *queue)
601 {
602 return queue->length == queue->limit;
603 }
604
605 /**
606 * rt2x00queue_free - Check the number of available entries in queue.
607 * @queue: Queue to check.
608 */
rt2x00queue_available(struct data_queue * queue)609 static inline int rt2x00queue_available(struct data_queue *queue)
610 {
611 return queue->limit - queue->length;
612 }
613
614 /**
615 * rt2x00queue_threshold - Check if the queue is below threshold
616 * @queue: Queue to check.
617 */
rt2x00queue_threshold(struct data_queue * queue)618 static inline int rt2x00queue_threshold(struct data_queue *queue)
619 {
620 return rt2x00queue_available(queue) < queue->threshold;
621 }
622 /**
623 * rt2x00queue_dma_timeout - Check if a timeout occurred for DMA transfers
624 * @entry: Queue entry to check.
625 */
rt2x00queue_dma_timeout(struct queue_entry * entry)626 static inline int rt2x00queue_dma_timeout(struct queue_entry *entry)
627 {
628 if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
629 return false;
630 return time_after(jiffies, entry->last_action + msecs_to_jiffies(100));
631 }
632
633 /**
634 * _rt2x00_desc_read - Read a word from the hardware descriptor.
635 * @desc: Base descriptor address
636 * @word: Word index from where the descriptor should be read.
637 */
_rt2x00_desc_read(__le32 * desc,const u8 word)638 static inline __le32 _rt2x00_desc_read(__le32 *desc, const u8 word)
639 {
640 return desc[word];
641 }
642
643 /**
644 * rt2x00_desc_read - Read a word from the hardware descriptor, this
645 * function will take care of the byte ordering.
646 * @desc: Base descriptor address
647 * @word: Word index from where the descriptor should be read.
648 */
rt2x00_desc_read(__le32 * desc,const u8 word)649 static inline u32 rt2x00_desc_read(__le32 *desc, const u8 word)
650 {
651 return le32_to_cpu(_rt2x00_desc_read(desc, word));
652 }
653
654 /**
655 * rt2x00_desc_write - write a word to the hardware descriptor, this
656 * function will take care of the byte ordering.
657 * @desc: Base descriptor address
658 * @word: Word index from where the descriptor should be written.
659 * @value: Value that should be written into the descriptor.
660 */
_rt2x00_desc_write(__le32 * desc,const u8 word,__le32 value)661 static inline void _rt2x00_desc_write(__le32 *desc, const u8 word, __le32 value)
662 {
663 desc[word] = value;
664 }
665
666 /**
667 * rt2x00_desc_write - write a word to the hardware descriptor.
668 * @desc: Base descriptor address
669 * @word: Word index from where the descriptor should be written.
670 * @value: Value that should be written into the descriptor.
671 */
rt2x00_desc_write(__le32 * desc,const u8 word,u32 value)672 static inline void rt2x00_desc_write(__le32 *desc, const u8 word, u32 value)
673 {
674 _rt2x00_desc_write(desc, word, cpu_to_le32(value));
675 }
676
677 #endif /* RT2X00QUEUE_H */
678