1 /* Copyright 2008 - 2016 Freescale Semiconductor, Inc. 2 * 3 * Redistribution and use in source and binary forms, with or without 4 * modification, are permitted provided that the following conditions are met: 5 * * Redistributions of source code must retain the above copyright 6 * notice, this list of conditions and the following disclaimer. 7 * * Redistributions in binary form must reproduce the above copyright 8 * notice, this list of conditions and the following disclaimer in the 9 * documentation and/or other materials provided with the distribution. 10 * * Neither the name of Freescale Semiconductor nor the 11 * names of its contributors may be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * ALTERNATIVELY, this software may be distributed under the terms of the 15 * GNU General Public License ("GPL") as published by the Free Software 16 * Foundation, either version 2 of that License or (at your option) any 17 * later version. 18 * 19 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY 20 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY 23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 28 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 32 33 #include "dpaa_sys.h" 34 35 #include <soc/fsl/qman.h> 36 #include <linux/dma-mapping.h> 37 #include <linux/iommu.h> 38 39 #if defined(CONFIG_FSL_PAMU) 40 #include <asm/fsl_pamu_stash.h> 41 #endif 42 43 struct qm_mcr_querywq { 44 u8 verb; 45 u8 result; 46 u16 channel_wq; /* ignores wq (3 lsbits): _res[0-2] */ 47 u8 __reserved[28]; 48 u32 wq_len[8]; 49 } __packed; 50 51 static inline u16 qm_mcr_querywq_get_chan(const struct qm_mcr_querywq *wq) 52 { 53 return wq->channel_wq >> 3; 54 } 55 56 struct __qm_mcr_querycongestion { 57 u32 state[8]; 58 }; 59 60 /* "Query Congestion Group State" */ 61 struct qm_mcr_querycongestion { 62 u8 verb; 63 u8 result; 64 u8 __reserved[30]; 65 /* Access this struct using qman_cgrs_get() */ 66 struct __qm_mcr_querycongestion state; 67 } __packed; 68 69 /* "Query CGR" */ 70 struct qm_mcr_querycgr { 71 u8 verb; 72 u8 result; 73 u16 __reserved1; 74 struct __qm_mc_cgr cgr; /* CGR fields */ 75 u8 __reserved2[6]; 76 u8 i_bcnt_hi; /* high 8-bits of 40-bit "Instant" */ 77 __be32 i_bcnt_lo; /* low 32-bits of 40-bit */ 78 u8 __reserved3[3]; 79 u8 a_bcnt_hi; /* high 8-bits of 40-bit "Average" */ 80 __be32 a_bcnt_lo; /* low 32-bits of 40-bit */ 81 __be32 cscn_targ_swp[4]; 82 } __packed; 83 84 static inline u64 qm_mcr_querycgr_i_get64(const struct qm_mcr_querycgr *q) 85 { 86 return ((u64)q->i_bcnt_hi << 32) | be32_to_cpu(q->i_bcnt_lo); 87 } 88 static inline u64 qm_mcr_querycgr_a_get64(const struct qm_mcr_querycgr *q) 89 { 90 return ((u64)q->a_bcnt_hi << 32) | be32_to_cpu(q->a_bcnt_lo); 91 } 92 93 /* Congestion Groups */ 94 95 /* 96 * This wrapper represents a bit-array for the state of the 256 QMan congestion 97 * groups. Is also used as a *mask* for congestion groups, eg. so we ignore 98 * those that don't concern us. We harness the structure and accessor details 99 * already used in the management command to query congestion groups. 100 */ 101 #define CGR_BITS_PER_WORD 5 102 #define CGR_WORD(x) ((x) >> CGR_BITS_PER_WORD) 103 #define CGR_BIT(x) (BIT(31) >> ((x) & 0x1f)) 104 #define CGR_NUM (sizeof(struct __qm_mcr_querycongestion) << 3) 105 106 struct qman_cgrs { 107 struct __qm_mcr_querycongestion q; 108 }; 109 110 static inline void qman_cgrs_init(struct qman_cgrs *c) 111 { 112 memset(c, 0, sizeof(*c)); 113 } 114 115 static inline void qman_cgrs_fill(struct qman_cgrs *c) 116 { 117 memset(c, 0xff, sizeof(*c)); 118 } 119 120 static inline int qman_cgrs_get(struct qman_cgrs *c, u8 cgr) 121 { 122 return c->q.state[CGR_WORD(cgr)] & CGR_BIT(cgr); 123 } 124 125 static inline void qman_cgrs_cp(struct qman_cgrs *dest, 126 const struct qman_cgrs *src) 127 { 128 *dest = *src; 129 } 130 131 static inline void qman_cgrs_and(struct qman_cgrs *dest, 132 const struct qman_cgrs *a, const struct qman_cgrs *b) 133 { 134 int ret; 135 u32 *_d = dest->q.state; 136 const u32 *_a = a->q.state; 137 const u32 *_b = b->q.state; 138 139 for (ret = 0; ret < 8; ret++) 140 *_d++ = *_a++ & *_b++; 141 } 142 143 static inline void qman_cgrs_xor(struct qman_cgrs *dest, 144 const struct qman_cgrs *a, const struct qman_cgrs *b) 145 { 146 int ret; 147 u32 *_d = dest->q.state; 148 const u32 *_a = a->q.state; 149 const u32 *_b = b->q.state; 150 151 for (ret = 0; ret < 8; ret++) 152 *_d++ = *_a++ ^ *_b++; 153 } 154 155 void qman_init_cgr_all(void); 156 157 struct qm_portal_config { 158 /* 159 * Corenet portal addresses; 160 * [0]==cache-enabled, [1]==cache-inhibited. 161 */ 162 void __iomem *addr_virt[2]; 163 struct device *dev; 164 struct iommu_domain *iommu_domain; 165 /* Allow these to be joined in lists */ 166 struct list_head list; 167 /* User-visible portal configuration settings */ 168 /* portal is affined to this cpu */ 169 int cpu; 170 /* portal interrupt line */ 171 int irq; 172 /* 173 * the portal's dedicated channel id, used initialising 174 * frame queues to target this portal when scheduled 175 */ 176 u16 channel; 177 /* 178 * mask of pool channels this portal has dequeue access to 179 * (using QM_SDQCR_CHANNELS_POOL(n) for the bitmask) 180 */ 181 u32 pools; 182 }; 183 184 /* Revision info (for errata and feature handling) */ 185 #define QMAN_REV11 0x0101 186 #define QMAN_REV12 0x0102 187 #define QMAN_REV20 0x0200 188 #define QMAN_REV30 0x0300 189 #define QMAN_REV31 0x0301 190 extern u16 qman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */ 191 192 #define QM_FQID_RANGE_START 1 /* FQID 0 reserved for internal use */ 193 extern struct gen_pool *qm_fqalloc; /* FQID allocator */ 194 extern struct gen_pool *qm_qpalloc; /* pool-channel allocator */ 195 extern struct gen_pool *qm_cgralloc; /* CGR ID allocator */ 196 u32 qm_get_pools_sdqcr(void); 197 198 int qman_wq_alloc(void); 199 void qman_liodn_fixup(u16 channel); 200 void qman_set_sdest(u16 channel, unsigned int cpu_idx); 201 202 struct qman_portal *qman_create_affine_portal( 203 const struct qm_portal_config *config, 204 const struct qman_cgrs *cgrs); 205 const struct qm_portal_config *qman_destroy_affine_portal(void); 206 207 /* 208 * qman_query_fq - Queries FQD fields (via h/w query command) 209 * @fq: the frame queue object to be queried 210 * @fqd: storage for the queried FQD fields 211 */ 212 int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd); 213 214 int qman_alloc_fq_table(u32 num_fqids); 215 216 /* QMan s/w corenet portal, low-level i/face */ 217 218 /* 219 * For qm_dqrr_sdqcr_set(); Choose one SOURCE. Choose one COUNT. Choose one 220 * dequeue TYPE. Choose TOKEN (8-bit). 221 * If SOURCE == CHANNELS, 222 * Choose CHANNELS_DEDICATED and/or CHANNELS_POOL(n). 223 * You can choose DEDICATED_PRECEDENCE if the portal channel should have 224 * priority. 225 * If SOURCE == SPECIFICWQ, 226 * Either select the work-queue ID with SPECIFICWQ_WQ(), or select the 227 * channel (SPECIFICWQ_DEDICATED or SPECIFICWQ_POOL()) and specify the 228 * work-queue priority (0-7) with SPECIFICWQ_WQ() - either way, you get the 229 * same value. 230 */ 231 #define QM_SDQCR_SOURCE_CHANNELS 0x0 232 #define QM_SDQCR_SOURCE_SPECIFICWQ 0x40000000 233 #define QM_SDQCR_COUNT_EXACT1 0x0 234 #define QM_SDQCR_COUNT_UPTO3 0x20000000 235 #define QM_SDQCR_DEDICATED_PRECEDENCE 0x10000000 236 #define QM_SDQCR_TYPE_MASK 0x03000000 237 #define QM_SDQCR_TYPE_NULL 0x0 238 #define QM_SDQCR_TYPE_PRIO_QOS 0x01000000 239 #define QM_SDQCR_TYPE_ACTIVE_QOS 0x02000000 240 #define QM_SDQCR_TYPE_ACTIVE 0x03000000 241 #define QM_SDQCR_TOKEN_MASK 0x00ff0000 242 #define QM_SDQCR_TOKEN_SET(v) (((v) & 0xff) << 16) 243 #define QM_SDQCR_TOKEN_GET(v) (((v) >> 16) & 0xff) 244 #define QM_SDQCR_CHANNELS_DEDICATED 0x00008000 245 #define QM_SDQCR_SPECIFICWQ_MASK 0x000000f7 246 #define QM_SDQCR_SPECIFICWQ_DEDICATED 0x00000000 247 #define QM_SDQCR_SPECIFICWQ_POOL(n) ((n) << 4) 248 #define QM_SDQCR_SPECIFICWQ_WQ(n) (n) 249 250 /* For qm_dqrr_vdqcr_set(): use FQID(n) to fill in the frame queue ID */ 251 #define QM_VDQCR_FQID_MASK 0x00ffffff 252 #define QM_VDQCR_FQID(n) ((n) & QM_VDQCR_FQID_MASK) 253 254 /* 255 * Used by all portal interrupt registers except 'inhibit' 256 * Channels with frame availability 257 */ 258 #define QM_PIRQ_DQAVAIL 0x0000ffff 259 260 /* The DQAVAIL interrupt fields break down into these bits; */ 261 #define QM_DQAVAIL_PORTAL 0x8000 /* Portal channel */ 262 #define QM_DQAVAIL_POOL(n) (0x8000 >> (n)) /* Pool channel, n==[1..15] */ 263 #define QM_DQAVAIL_MASK 0xffff 264 /* This mask contains all the "irqsource" bits visible to API users */ 265 #define QM_PIRQ_VISIBLE (QM_PIRQ_SLOW | QM_PIRQ_DQRI) 266 267 extern struct qman_portal *affine_portals[NR_CPUS]; 268 extern struct qman_portal *qman_dma_portal; 269 const struct qm_portal_config *qman_get_qm_portal_config( 270 struct qman_portal *portal); 271