1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 */
5
6 #ifndef _CQ_DESC_H_
7 #define _CQ_DESC_H_
8
9 /*
10 * Completion queue descriptor types
11 */
12 enum cq_desc_types {
13 CQ_DESC_TYPE_WQ_ENET = 0,
14 CQ_DESC_TYPE_DESC_COPY = 1,
15 CQ_DESC_TYPE_WQ_EXCH = 2,
16 CQ_DESC_TYPE_RQ_ENET = 3,
17 CQ_DESC_TYPE_RQ_FCP = 4,
18 CQ_DESC_TYPE_IOMMU_MISS = 5,
19 CQ_DESC_TYPE_SGL = 6,
20 CQ_DESC_TYPE_CLASSIFIER = 7,
21 CQ_DESC_TYPE_TEST = 127,
22 };
23
24 /* Completion queue descriptor: 16B
25 *
26 * All completion queues have this basic layout. The
27 * type_specfic area is unique for each completion
28 * queue type.
29 */
30 struct cq_desc {
31 __le16 completed_index;
32 __le16 q_number;
33 u8 type_specfic[11];
34 u8 type_color;
35 };
36
37 #define CQ_DESC_TYPE_BITS 4
38 #define CQ_DESC_TYPE_MASK ((1 << CQ_DESC_TYPE_BITS) - 1)
39 #define CQ_DESC_COLOR_MASK 1
40 #define CQ_DESC_COLOR_SHIFT 7
41 #define CQ_DESC_COLOR_MASK_NOSHIFT 0x80
42 #define CQ_DESC_Q_NUM_BITS 10
43 #define CQ_DESC_Q_NUM_MASK ((1 << CQ_DESC_Q_NUM_BITS) - 1)
44 #define CQ_DESC_COMP_NDX_BITS 12
45 #define CQ_DESC_COMP_NDX_MASK ((1 << CQ_DESC_COMP_NDX_BITS) - 1)
46
cq_color_enc(struct cq_desc * desc,const u8 color)47 static inline void cq_color_enc(struct cq_desc *desc, const u8 color)
48 {
49 if (color)
50 desc->type_color |= (1 << CQ_DESC_COLOR_SHIFT);
51 else
52 desc->type_color &= ~(1 << CQ_DESC_COLOR_SHIFT);
53 }
54
cq_desc_enc(struct cq_desc * desc,const u8 type,const u8 color,const u16 q_number,const u16 completed_index)55 static inline void cq_desc_enc(struct cq_desc *desc,
56 const u8 type, const u8 color, const u16 q_number,
57 const u16 completed_index)
58 {
59 desc->type_color = (type & CQ_DESC_TYPE_MASK) |
60 ((color & CQ_DESC_COLOR_MASK) << CQ_DESC_COLOR_SHIFT);
61 desc->q_number = cpu_to_le16(q_number & CQ_DESC_Q_NUM_MASK);
62 desc->completed_index = cpu_to_le16(completed_index &
63 CQ_DESC_COMP_NDX_MASK);
64 }
65
cq_desc_dec(const struct cq_desc * desc_arg,u8 * type,u8 * color,u16 * q_number,u16 * completed_index)66 static inline void cq_desc_dec(const struct cq_desc *desc_arg,
67 u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
68 {
69 const struct cq_desc *desc = desc_arg;
70 const u8 type_color = desc->type_color;
71
72 *color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
73
74 /*
75 * Make sure color bit is read from desc *before* other fields
76 * are read from desc. Hardware guarantees color bit is last
77 * bit (byte) written. Adding the rmb() prevents the compiler
78 * and/or CPU from reordering the reads which would potentially
79 * result in reading stale values.
80 */
81
82 rmb();
83
84 *type = type_color & CQ_DESC_TYPE_MASK;
85 *q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK;
86 *completed_index = le16_to_cpu(desc->completed_index) &
87 CQ_DESC_COMP_NDX_MASK;
88 }
89
cq_color_dec(const struct cq_desc * desc_arg,u8 * color)90 static inline void cq_color_dec(const struct cq_desc *desc_arg, u8 *color)
91 {
92 volatile const struct cq_desc *desc = desc_arg;
93
94 *color = (desc->type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
95 }
96
97 #endif /* _CQ_DESC_H_ */
98