1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later 2 /* 3 * Copyright 2008 - 2015 Freescale Semiconductor Inc. 4 * Copyright 2020 NXP 5 */ 6 7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 9 #include <linux/fsl/guts.h> 10 #include <linux/platform_device.h> 11 #include <linux/slab.h> 12 #include <linux/delay.h> 13 #include <linux/module.h> 14 #include <linux/of_platform.h> 15 #include <linux/clk.h> 16 #include <linux/of_address.h> 17 #include <linux/of_irq.h> 18 #include <linux/interrupt.h> 19 #include <linux/libfdt_env.h> 20 21 #include "fman.h" 22 #include "fman_muram.h" 23 #include "fman_keygen.h" 24 25 /* General defines */ 26 #define FMAN_LIODN_TBL 64 /* size of LIODN table */ 27 #define MAX_NUM_OF_MACS 10 28 #define FM_NUM_OF_FMAN_CTRL_EVENT_REGS 4 29 #define BASE_RX_PORTID 0x08 30 #define BASE_TX_PORTID 0x28 31 32 /* Modules registers offsets */ 33 #define BMI_OFFSET 0x00080000 34 #define QMI_OFFSET 0x00080400 35 #define KG_OFFSET 0x000C1000 36 #define DMA_OFFSET 0x000C2000 37 #define FPM_OFFSET 0x000C3000 38 #define IMEM_OFFSET 0x000C4000 39 #define HWP_OFFSET 0x000C7000 40 #define CGP_OFFSET 0x000DB000 41 42 /* Exceptions bit map */ 43 #define EX_DMA_BUS_ERROR 0x80000000 44 #define EX_DMA_READ_ECC 0x40000000 45 #define EX_DMA_SYSTEM_WRITE_ECC 0x20000000 46 #define EX_DMA_FM_WRITE_ECC 0x10000000 47 #define EX_FPM_STALL_ON_TASKS 0x08000000 48 #define EX_FPM_SINGLE_ECC 0x04000000 49 #define EX_FPM_DOUBLE_ECC 0x02000000 50 #define EX_QMI_SINGLE_ECC 0x01000000 51 #define EX_QMI_DEQ_FROM_UNKNOWN_PORTID 0x00800000 52 #define EX_QMI_DOUBLE_ECC 0x00400000 53 #define EX_BMI_LIST_RAM_ECC 0x00200000 54 #define EX_BMI_STORAGE_PROFILE_ECC 0x00100000 55 #define EX_BMI_STATISTICS_RAM_ECC 0x00080000 56 #define EX_IRAM_ECC 0x00040000 57 #define EX_MURAM_ECC 0x00020000 58 #define EX_BMI_DISPATCH_RAM_ECC 0x00010000 59 #define EX_DMA_SINGLE_PORT_ECC 0x00008000 60 61 /* DMA defines */ 62 /* masks */ 63 #define DMA_MODE_BER 0x00200000 64 #define DMA_MODE_ECC 0x00000020 65 #define DMA_MODE_SECURE_PROT 0x00000800 66 #define DMA_MODE_AXI_DBG_MASK 0x0F000000 67 68 #define DMA_TRANSFER_PORTID_MASK 0xFF000000 69 #define DMA_TRANSFER_TNUM_MASK 0x00FF0000 70 #define DMA_TRANSFER_LIODN_MASK 0x00000FFF 71 72 #define DMA_STATUS_BUS_ERR 0x08000000 73 #define DMA_STATUS_READ_ECC 0x04000000 74 #define DMA_STATUS_SYSTEM_WRITE_ECC 0x02000000 75 #define DMA_STATUS_FM_WRITE_ECC 0x01000000 76 #define DMA_STATUS_FM_SPDAT_ECC 0x00080000 77 78 #define DMA_MODE_CACHE_OR_SHIFT 30 79 #define DMA_MODE_AXI_DBG_SHIFT 24 80 #define DMA_MODE_CEN_SHIFT 13 81 #define DMA_MODE_CEN_MASK 0x00000007 82 #define DMA_MODE_DBG_SHIFT 7 83 #define DMA_MODE_AID_MODE_SHIFT 4 84 85 #define DMA_THRESH_COMMQ_SHIFT 24 86 #define DMA_THRESH_READ_INT_BUF_SHIFT 16 87 #define DMA_THRESH_READ_INT_BUF_MASK 0x0000003f 88 #define DMA_THRESH_WRITE_INT_BUF_MASK 0x0000003f 89 90 #define DMA_TRANSFER_PORTID_SHIFT 24 91 #define DMA_TRANSFER_TNUM_SHIFT 16 92 93 #define DMA_CAM_SIZEOF_ENTRY 0x40 94 #define DMA_CAM_UNITS 8 95 96 #define DMA_LIODN_SHIFT 16 97 #define DMA_LIODN_BASE_MASK 0x00000FFF 98 99 /* FPM defines */ 100 #define FPM_EV_MASK_DOUBLE_ECC 0x80000000 101 #define FPM_EV_MASK_STALL 0x40000000 102 #define FPM_EV_MASK_SINGLE_ECC 0x20000000 103 #define FPM_EV_MASK_RELEASE_FM 0x00010000 104 #define FPM_EV_MASK_DOUBLE_ECC_EN 0x00008000 105 #define FPM_EV_MASK_STALL_EN 0x00004000 106 #define FPM_EV_MASK_SINGLE_ECC_EN 0x00002000 107 #define FPM_EV_MASK_EXTERNAL_HALT 0x00000008 108 #define FPM_EV_MASK_ECC_ERR_HALT 0x00000004 109 110 #define FPM_RAM_MURAM_ECC 0x00008000 111 #define FPM_RAM_IRAM_ECC 0x00004000 112 #define FPM_IRAM_ECC_ERR_EX_EN 0x00020000 113 #define FPM_MURAM_ECC_ERR_EX_EN 0x00040000 114 #define FPM_RAM_IRAM_ECC_EN 0x40000000 115 #define FPM_RAM_RAMS_ECC_EN 0x80000000 116 #define FPM_RAM_RAMS_ECC_EN_SRC_SEL 0x08000000 117 118 #define FPM_REV1_MAJOR_MASK 0x0000FF00 119 #define FPM_REV1_MINOR_MASK 0x000000FF 120 121 #define FPM_DISP_LIMIT_SHIFT 24 122 123 #define FPM_PRT_FM_CTL1 0x00000001 124 #define FPM_PRT_FM_CTL2 0x00000002 125 #define FPM_PORT_FM_CTL_PORTID_SHIFT 24 126 #define FPM_PRC_ORA_FM_CTL_SEL_SHIFT 16 127 128 #define FPM_THR1_PRS_SHIFT 24 129 #define FPM_THR1_KG_SHIFT 16 130 #define FPM_THR1_PLCR_SHIFT 8 131 #define FPM_THR1_BMI_SHIFT 0 132 133 #define FPM_THR2_QMI_ENQ_SHIFT 24 134 #define FPM_THR2_QMI_DEQ_SHIFT 0 135 #define FPM_THR2_FM_CTL1_SHIFT 16 136 #define FPM_THR2_FM_CTL2_SHIFT 8 137 138 #define FPM_EV_MASK_CAT_ERR_SHIFT 1 139 #define FPM_EV_MASK_DMA_ERR_SHIFT 0 140 141 #define FPM_REV1_MAJOR_SHIFT 8 142 143 #define FPM_RSTC_FM_RESET 0x80000000 144 #define FPM_RSTC_MAC0_RESET 0x40000000 145 #define FPM_RSTC_MAC1_RESET 0x20000000 146 #define FPM_RSTC_MAC2_RESET 0x10000000 147 #define FPM_RSTC_MAC3_RESET 0x08000000 148 #define FPM_RSTC_MAC8_RESET 0x04000000 149 #define FPM_RSTC_MAC4_RESET 0x02000000 150 #define FPM_RSTC_MAC5_RESET 0x01000000 151 #define FPM_RSTC_MAC6_RESET 0x00800000 152 #define FPM_RSTC_MAC7_RESET 0x00400000 153 #define FPM_RSTC_MAC9_RESET 0x00200000 154 155 #define FPM_TS_INT_SHIFT 16 156 #define FPM_TS_CTL_EN 0x80000000 157 158 /* BMI defines */ 159 #define BMI_INIT_START 0x80000000 160 #define BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC 0x80000000 161 #define BMI_ERR_INTR_EN_LIST_RAM_ECC 0x40000000 162 #define BMI_ERR_INTR_EN_STATISTICS_RAM_ECC 0x20000000 163 #define BMI_ERR_INTR_EN_DISPATCH_RAM_ECC 0x10000000 164 #define BMI_NUM_OF_TASKS_MASK 0x3F000000 165 #define BMI_NUM_OF_EXTRA_TASKS_MASK 0x000F0000 166 #define BMI_NUM_OF_DMAS_MASK 0x00000F00 167 #define BMI_NUM_OF_EXTRA_DMAS_MASK 0x0000000F 168 #define BMI_FIFO_SIZE_MASK 0x000003FF 169 #define BMI_EXTRA_FIFO_SIZE_MASK 0x03FF0000 170 #define BMI_CFG2_DMAS_MASK 0x0000003F 171 #define BMI_CFG2_TASKS_MASK 0x0000003F 172 173 #define BMI_CFG2_TASKS_SHIFT 16 174 #define BMI_CFG2_DMAS_SHIFT 0 175 #define BMI_CFG1_FIFO_SIZE_SHIFT 16 176 #define BMI_NUM_OF_TASKS_SHIFT 24 177 #define BMI_EXTRA_NUM_OF_TASKS_SHIFT 16 178 #define BMI_NUM_OF_DMAS_SHIFT 8 179 #define BMI_EXTRA_NUM_OF_DMAS_SHIFT 0 180 181 #define BMI_FIFO_ALIGN 0x100 182 183 #define BMI_EXTRA_FIFO_SIZE_SHIFT 16 184 185 /* QMI defines */ 186 #define QMI_CFG_ENQ_EN 0x80000000 187 #define QMI_CFG_DEQ_EN 0x40000000 188 #define QMI_CFG_EN_COUNTERS 0x10000000 189 #define QMI_CFG_DEQ_MASK 0x0000003F 190 #define QMI_CFG_ENQ_MASK 0x00003F00 191 #define QMI_CFG_ENQ_SHIFT 8 192 193 #define QMI_ERR_INTR_EN_DOUBLE_ECC 0x80000000 194 #define QMI_ERR_INTR_EN_DEQ_FROM_DEF 0x40000000 195 #define QMI_INTR_EN_SINGLE_ECC 0x80000000 196 197 #define QMI_GS_HALT_NOT_BUSY 0x00000002 198 199 /* HWP defines */ 200 #define HWP_RPIMAC_PEN 0x00000001 201 202 /* IRAM defines */ 203 #define IRAM_IADD_AIE 0x80000000 204 #define IRAM_READY 0x80000000 205 206 /* Default values */ 207 #define DEFAULT_CATASTROPHIC_ERR 0 208 #define DEFAULT_DMA_ERR 0 209 #define DEFAULT_AID_MODE FMAN_DMA_AID_OUT_TNUM 210 #define DEFAULT_DMA_COMM_Q_LOW 0x2A 211 #define DEFAULT_DMA_COMM_Q_HIGH 0x3F 212 #define DEFAULT_CACHE_OVERRIDE 0 213 #define DEFAULT_DMA_CAM_NUM_OF_ENTRIES 64 214 #define DEFAULT_DMA_DBG_CNT_MODE 0 215 #define DEFAULT_DMA_SOS_EMERGENCY 0 216 #define DEFAULT_DMA_WATCHDOG 0 217 #define DEFAULT_DISP_LIMIT 0 218 #define DEFAULT_PRS_DISP_TH 16 219 #define DEFAULT_PLCR_DISP_TH 16 220 #define DEFAULT_KG_DISP_TH 16 221 #define DEFAULT_BMI_DISP_TH 16 222 #define DEFAULT_QMI_ENQ_DISP_TH 16 223 #define DEFAULT_QMI_DEQ_DISP_TH 16 224 #define DEFAULT_FM_CTL1_DISP_TH 16 225 #define DEFAULT_FM_CTL2_DISP_TH 16 226 227 #define DFLT_AXI_DBG_NUM_OF_BEATS 1 228 229 #define DFLT_DMA_READ_INT_BUF_LOW(dma_thresh_max_buf) \ 230 ((dma_thresh_max_buf + 1) / 2) 231 #define DFLT_DMA_READ_INT_BUF_HIGH(dma_thresh_max_buf) \ 232 ((dma_thresh_max_buf + 1) * 3 / 4) 233 #define DFLT_DMA_WRITE_INT_BUF_LOW(dma_thresh_max_buf) \ 234 ((dma_thresh_max_buf + 1) / 2) 235 #define DFLT_DMA_WRITE_INT_BUF_HIGH(dma_thresh_max_buf)\ 236 ((dma_thresh_max_buf + 1) * 3 / 4) 237 238 #define DMA_COMM_Q_LOW_FMAN_V3 0x2A 239 #define DMA_COMM_Q_LOW_FMAN_V2(dma_thresh_max_commq) \ 240 ((dma_thresh_max_commq + 1) / 2) 241 #define DFLT_DMA_COMM_Q_LOW(major, dma_thresh_max_commq) \ 242 ((major == 6) ? DMA_COMM_Q_LOW_FMAN_V3 : \ 243 DMA_COMM_Q_LOW_FMAN_V2(dma_thresh_max_commq)) 244 245 #define DMA_COMM_Q_HIGH_FMAN_V3 0x3f 246 #define DMA_COMM_Q_HIGH_FMAN_V2(dma_thresh_max_commq) \ 247 ((dma_thresh_max_commq + 1) * 3 / 4) 248 #define DFLT_DMA_COMM_Q_HIGH(major, dma_thresh_max_commq) \ 249 ((major == 6) ? DMA_COMM_Q_HIGH_FMAN_V3 : \ 250 DMA_COMM_Q_HIGH_FMAN_V2(dma_thresh_max_commq)) 251 252 #define TOTAL_NUM_OF_TASKS_FMAN_V3L 59 253 #define TOTAL_NUM_OF_TASKS_FMAN_V3H 124 254 #define DFLT_TOTAL_NUM_OF_TASKS(major, minor, bmi_max_num_of_tasks) \ 255 ((major == 6) ? ((minor == 1 || minor == 4) ? \ 256 TOTAL_NUM_OF_TASKS_FMAN_V3L : TOTAL_NUM_OF_TASKS_FMAN_V3H) : \ 257 bmi_max_num_of_tasks) 258 259 #define DMA_CAM_NUM_OF_ENTRIES_FMAN_V3 64 260 #define DMA_CAM_NUM_OF_ENTRIES_FMAN_V2 32 261 #define DFLT_DMA_CAM_NUM_OF_ENTRIES(major) \ 262 (major == 6 ? DMA_CAM_NUM_OF_ENTRIES_FMAN_V3 : \ 263 DMA_CAM_NUM_OF_ENTRIES_FMAN_V2) 264 265 #define FM_TIMESTAMP_1_USEC_BIT 8 266 267 /* Defines used for enabling/disabling FMan interrupts */ 268 #define ERR_INTR_EN_DMA 0x00010000 269 #define ERR_INTR_EN_FPM 0x80000000 270 #define ERR_INTR_EN_BMI 0x00800000 271 #define ERR_INTR_EN_QMI 0x00400000 272 #define ERR_INTR_EN_MURAM 0x00040000 273 #define ERR_INTR_EN_MAC0 0x00004000 274 #define ERR_INTR_EN_MAC1 0x00002000 275 #define ERR_INTR_EN_MAC2 0x00001000 276 #define ERR_INTR_EN_MAC3 0x00000800 277 #define ERR_INTR_EN_MAC4 0x00000400 278 #define ERR_INTR_EN_MAC5 0x00000200 279 #define ERR_INTR_EN_MAC6 0x00000100 280 #define ERR_INTR_EN_MAC7 0x00000080 281 #define ERR_INTR_EN_MAC8 0x00008000 282 #define ERR_INTR_EN_MAC9 0x00000040 283 284 #define INTR_EN_QMI 0x40000000 285 #define INTR_EN_MAC0 0x00080000 286 #define INTR_EN_MAC1 0x00040000 287 #define INTR_EN_MAC2 0x00020000 288 #define INTR_EN_MAC3 0x00010000 289 #define INTR_EN_MAC4 0x00000040 290 #define INTR_EN_MAC5 0x00000020 291 #define INTR_EN_MAC6 0x00000008 292 #define INTR_EN_MAC7 0x00000002 293 #define INTR_EN_MAC8 0x00200000 294 #define INTR_EN_MAC9 0x00100000 295 #define INTR_EN_REV0 0x00008000 296 #define INTR_EN_REV1 0x00004000 297 #define INTR_EN_REV2 0x00002000 298 #define INTR_EN_REV3 0x00001000 299 #define INTR_EN_TMR 0x01000000 300 301 enum fman_dma_aid_mode { 302 FMAN_DMA_AID_OUT_PORT_ID = 0, /* 4 LSB of PORT_ID */ 303 FMAN_DMA_AID_OUT_TNUM /* 4 LSB of TNUM */ 304 }; 305 306 struct fman_iram_regs { 307 u32 iadd; /* FM IRAM instruction address register */ 308 u32 idata; /* FM IRAM instruction data register */ 309 u32 itcfg; /* FM IRAM timing config register */ 310 u32 iready; /* FM IRAM ready register */ 311 }; 312 313 struct fman_fpm_regs { 314 u32 fmfp_tnc; /* FPM TNUM Control 0x00 */ 315 u32 fmfp_prc; /* FPM Port_ID FmCtl Association 0x04 */ 316 u32 fmfp_brkc; /* FPM Breakpoint Control 0x08 */ 317 u32 fmfp_mxd; /* FPM Flush Control 0x0c */ 318 u32 fmfp_dist1; /* FPM Dispatch Thresholds1 0x10 */ 319 u32 fmfp_dist2; /* FPM Dispatch Thresholds2 0x14 */ 320 u32 fm_epi; /* FM Error Pending Interrupts 0x18 */ 321 u32 fm_rie; /* FM Error Interrupt Enable 0x1c */ 322 u32 fmfp_fcev[4]; /* FPM FMan-Controller Event 1-4 0x20-0x2f */ 323 u32 res0030[4]; /* res 0x30 - 0x3f */ 324 u32 fmfp_cee[4]; /* PM FMan-Controller Event 1-4 0x40-0x4f */ 325 u32 res0050[4]; /* res 0x50-0x5f */ 326 u32 fmfp_tsc1; /* FPM TimeStamp Control1 0x60 */ 327 u32 fmfp_tsc2; /* FPM TimeStamp Control2 0x64 */ 328 u32 fmfp_tsp; /* FPM Time Stamp 0x68 */ 329 u32 fmfp_tsf; /* FPM Time Stamp Fraction 0x6c */ 330 u32 fm_rcr; /* FM Rams Control 0x70 */ 331 u32 fmfp_extc; /* FPM External Requests Control 0x74 */ 332 u32 fmfp_ext1; /* FPM External Requests Config1 0x78 */ 333 u32 fmfp_ext2; /* FPM External Requests Config2 0x7c */ 334 u32 fmfp_drd[16]; /* FPM Data_Ram Data 0-15 0x80 - 0xbf */ 335 u32 fmfp_dra; /* FPM Data Ram Access 0xc0 */ 336 u32 fm_ip_rev_1; /* FM IP Block Revision 1 0xc4 */ 337 u32 fm_ip_rev_2; /* FM IP Block Revision 2 0xc8 */ 338 u32 fm_rstc; /* FM Reset Command 0xcc */ 339 u32 fm_cld; /* FM Classifier Debug 0xd0 */ 340 u32 fm_npi; /* FM Normal Pending Interrupts 0xd4 */ 341 u32 fmfp_exte; /* FPM External Requests Enable 0xd8 */ 342 u32 fmfp_ee; /* FPM Event&Mask 0xdc */ 343 u32 fmfp_cev[4]; /* FPM CPU Event 1-4 0xe0-0xef */ 344 u32 res00f0[4]; /* res 0xf0-0xff */ 345 u32 fmfp_ps[50]; /* FPM Port Status 0x100-0x1c7 */ 346 u32 res01c8[14]; /* res 0x1c8-0x1ff */ 347 u32 fmfp_clfabc; /* FPM CLFABC 0x200 */ 348 u32 fmfp_clfcc; /* FPM CLFCC 0x204 */ 349 u32 fmfp_clfaval; /* FPM CLFAVAL 0x208 */ 350 u32 fmfp_clfbval; /* FPM CLFBVAL 0x20c */ 351 u32 fmfp_clfcval; /* FPM CLFCVAL 0x210 */ 352 u32 fmfp_clfamsk; /* FPM CLFAMSK 0x214 */ 353 u32 fmfp_clfbmsk; /* FPM CLFBMSK 0x218 */ 354 u32 fmfp_clfcmsk; /* FPM CLFCMSK 0x21c */ 355 u32 fmfp_clfamc; /* FPM CLFAMC 0x220 */ 356 u32 fmfp_clfbmc; /* FPM CLFBMC 0x224 */ 357 u32 fmfp_clfcmc; /* FPM CLFCMC 0x228 */ 358 u32 fmfp_decceh; /* FPM DECCEH 0x22c */ 359 u32 res0230[116]; /* res 0x230 - 0x3ff */ 360 u32 fmfp_ts[128]; /* 0x400: FPM Task Status 0x400 - 0x5ff */ 361 u32 res0600[0x400 - 384]; 362 }; 363 364 struct fman_bmi_regs { 365 u32 fmbm_init; /* BMI Initialization 0x00 */ 366 u32 fmbm_cfg1; /* BMI Configuration 1 0x04 */ 367 u32 fmbm_cfg2; /* BMI Configuration 2 0x08 */ 368 u32 res000c[5]; /* 0x0c - 0x1f */ 369 u32 fmbm_ievr; /* Interrupt Event Register 0x20 */ 370 u32 fmbm_ier; /* Interrupt Enable Register 0x24 */ 371 u32 fmbm_ifr; /* Interrupt Force Register 0x28 */ 372 u32 res002c[5]; /* 0x2c - 0x3f */ 373 u32 fmbm_arb[8]; /* BMI Arbitration 0x40 - 0x5f */ 374 u32 res0060[12]; /* 0x60 - 0x8f */ 375 u32 fmbm_dtc[3]; /* Debug Trap Counter 0x90 - 0x9b */ 376 u32 res009c; /* 0x9c */ 377 u32 fmbm_dcv[3][4]; /* Debug Compare val 0xa0-0xcf */ 378 u32 fmbm_dcm[3][4]; /* Debug Compare Mask 0xd0-0xff */ 379 u32 fmbm_gde; /* BMI Global Debug Enable 0x100 */ 380 u32 fmbm_pp[63]; /* BMI Port Parameters 0x104 - 0x1ff */ 381 u32 res0200; /* 0x200 */ 382 u32 fmbm_pfs[63]; /* BMI Port FIFO Size 0x204 - 0x2ff */ 383 u32 res0300; /* 0x300 */ 384 u32 fmbm_spliodn[63]; /* Port Partition ID 0x304 - 0x3ff */ 385 }; 386 387 struct fman_qmi_regs { 388 u32 fmqm_gc; /* General Configuration Register 0x00 */ 389 u32 res0004; /* 0x04 */ 390 u32 fmqm_eie; /* Error Interrupt Event Register 0x08 */ 391 u32 fmqm_eien; /* Error Interrupt Enable Register 0x0c */ 392 u32 fmqm_eif; /* Error Interrupt Force Register 0x10 */ 393 u32 fmqm_ie; /* Interrupt Event Register 0x14 */ 394 u32 fmqm_ien; /* Interrupt Enable Register 0x18 */ 395 u32 fmqm_if; /* Interrupt Force Register 0x1c */ 396 u32 fmqm_gs; /* Global Status Register 0x20 */ 397 u32 fmqm_ts; /* Task Status Register 0x24 */ 398 u32 fmqm_etfc; /* Enqueue Total Frame Counter 0x28 */ 399 u32 fmqm_dtfc; /* Dequeue Total Frame Counter 0x2c */ 400 u32 fmqm_dc0; /* Dequeue Counter 0 0x30 */ 401 u32 fmqm_dc1; /* Dequeue Counter 1 0x34 */ 402 u32 fmqm_dc2; /* Dequeue Counter 2 0x38 */ 403 u32 fmqm_dc3; /* Dequeue Counter 3 0x3c */ 404 u32 fmqm_dfdc; /* Dequeue FQID from Default Counter 0x40 */ 405 u32 fmqm_dfcc; /* Dequeue FQID from Context Counter 0x44 */ 406 u32 fmqm_dffc; /* Dequeue FQID from FD Counter 0x48 */ 407 u32 fmqm_dcc; /* Dequeue Confirm Counter 0x4c */ 408 u32 res0050[7]; /* 0x50 - 0x6b */ 409 u32 fmqm_tapc; /* Tnum Aging Period Control 0x6c */ 410 u32 fmqm_dmcvc; /* Dequeue MAC Command Valid Counter 0x70 */ 411 u32 fmqm_difdcc; /* Dequeue Invalid FD Command Counter 0x74 */ 412 u32 fmqm_da1v; /* Dequeue A1 Valid Counter 0x78 */ 413 u32 res007c; /* 0x7c */ 414 u32 fmqm_dtc; /* 0x80 Debug Trap Counter 0x80 */ 415 u32 fmqm_efddd; /* 0x84 Enqueue Frame desc Dynamic dbg 0x84 */ 416 u32 res0088[2]; /* 0x88 - 0x8f */ 417 struct { 418 u32 fmqm_dtcfg1; /* 0x90 dbg trap cfg 1 Register 0x00 */ 419 u32 fmqm_dtval1; /* Debug Trap Value 1 Register 0x04 */ 420 u32 fmqm_dtm1; /* Debug Trap Mask 1 Register 0x08 */ 421 u32 fmqm_dtc1; /* Debug Trap Counter 1 Register 0x0c */ 422 u32 fmqm_dtcfg2; /* dbg Trap cfg 2 Register 0x10 */ 423 u32 fmqm_dtval2; /* Debug Trap Value 2 Register 0x14 */ 424 u32 fmqm_dtm2; /* Debug Trap Mask 2 Register 0x18 */ 425 u32 res001c; /* 0x1c */ 426 } dbg_traps[3]; /* 0x90 - 0xef */ 427 u8 res00f0[0x400 - 0xf0]; /* 0xf0 - 0x3ff */ 428 }; 429 430 struct fman_dma_regs { 431 u32 fmdmsr; /* FM DMA status register 0x00 */ 432 u32 fmdmmr; /* FM DMA mode register 0x04 */ 433 u32 fmdmtr; /* FM DMA bus threshold register 0x08 */ 434 u32 fmdmhy; /* FM DMA bus hysteresis register 0x0c */ 435 u32 fmdmsetr; /* FM DMA SOS emergency Threshold Register 0x10 */ 436 u32 fmdmtah; /* FM DMA transfer bus address high reg 0x14 */ 437 u32 fmdmtal; /* FM DMA transfer bus address low reg 0x18 */ 438 u32 fmdmtcid; /* FM DMA transfer bus communication ID reg 0x1c */ 439 u32 fmdmra; /* FM DMA bus internal ram address register 0x20 */ 440 u32 fmdmrd; /* FM DMA bus internal ram data register 0x24 */ 441 u32 fmdmwcr; /* FM DMA CAM watchdog counter value 0x28 */ 442 u32 fmdmebcr; /* FM DMA CAM base in MURAM register 0x2c */ 443 u32 fmdmccqdr; /* FM DMA CAM and CMD Queue Debug reg 0x30 */ 444 u32 fmdmccqvr1; /* FM DMA CAM and CMD Queue Value reg #1 0x34 */ 445 u32 fmdmccqvr2; /* FM DMA CAM and CMD Queue Value reg #2 0x38 */ 446 u32 fmdmcqvr3; /* FM DMA CMD Queue Value register #3 0x3c */ 447 u32 fmdmcqvr4; /* FM DMA CMD Queue Value register #4 0x40 */ 448 u32 fmdmcqvr5; /* FM DMA CMD Queue Value register #5 0x44 */ 449 u32 fmdmsefrc; /* FM DMA Semaphore Entry Full Reject Cntr 0x48 */ 450 u32 fmdmsqfrc; /* FM DMA Semaphore Queue Full Reject Cntr 0x4c */ 451 u32 fmdmssrc; /* FM DMA Semaphore SYNC Reject Counter 0x50 */ 452 u32 fmdmdcr; /* FM DMA Debug Counter 0x54 */ 453 u32 fmdmemsr; /* FM DMA Emergency Smoother Register 0x58 */ 454 u32 res005c; /* 0x5c */ 455 u32 fmdmplr[FMAN_LIODN_TBL / 2]; /* DMA LIODN regs 0x60-0xdf */ 456 u32 res00e0[0x400 - 56]; 457 }; 458 459 struct fman_hwp_regs { 460 u32 res0000[0x844 / 4]; /* 0x000..0x843 */ 461 u32 fmprrpimac; /* FM Parser Internal memory access control */ 462 u32 res[(0x1000 - 0x848) / 4]; /* 0x848..0xFFF */ 463 }; 464 465 /* Structure that holds current FMan state. 466 * Used for saving run time information. 467 */ 468 struct fman_state_struct { 469 u8 fm_id; 470 u16 fm_clk_freq; 471 struct fman_rev_info rev_info; 472 bool enabled_time_stamp; 473 u8 count1_micro_bit; 474 u8 total_num_of_tasks; 475 u8 accumulated_num_of_tasks; 476 u32 accumulated_fifo_size; 477 u8 accumulated_num_of_open_dmas; 478 u8 accumulated_num_of_deq_tnums; 479 u32 exceptions; 480 u32 extra_fifo_pool_size; 481 u8 extra_tasks_pool_size; 482 u8 extra_open_dmas_pool_size; 483 u16 port_mfl[MAX_NUM_OF_MACS]; 484 u16 mac_mfl[MAX_NUM_OF_MACS]; 485 486 /* SOC specific */ 487 u32 fm_iram_size; 488 /* DMA */ 489 u32 dma_thresh_max_commq; 490 u32 dma_thresh_max_buf; 491 u32 max_num_of_open_dmas; 492 /* QMI */ 493 u32 qmi_max_num_of_tnums; 494 u32 qmi_def_tnums_thresh; 495 /* BMI */ 496 u32 bmi_max_num_of_tasks; 497 u32 bmi_max_fifo_size; 498 /* General */ 499 u32 fm_port_num_of_cg; 500 u32 num_of_rx_ports; 501 u32 total_fifo_size; 502 503 u32 qman_channel_base; 504 u32 num_of_qman_channels; 505 506 struct resource *res; 507 }; 508 509 /* Structure that holds FMan initial configuration */ 510 struct fman_cfg { 511 u8 disp_limit_tsh; 512 u8 prs_disp_tsh; 513 u8 plcr_disp_tsh; 514 u8 kg_disp_tsh; 515 u8 bmi_disp_tsh; 516 u8 qmi_enq_disp_tsh; 517 u8 qmi_deq_disp_tsh; 518 u8 fm_ctl1_disp_tsh; 519 u8 fm_ctl2_disp_tsh; 520 int dma_cache_override; 521 enum fman_dma_aid_mode dma_aid_mode; 522 u32 dma_axi_dbg_num_of_beats; 523 u32 dma_cam_num_of_entries; 524 u32 dma_watchdog; 525 u8 dma_comm_qtsh_asrt_emer; 526 u32 dma_write_buf_tsh_asrt_emer; 527 u32 dma_read_buf_tsh_asrt_emer; 528 u8 dma_comm_qtsh_clr_emer; 529 u32 dma_write_buf_tsh_clr_emer; 530 u32 dma_read_buf_tsh_clr_emer; 531 u32 dma_sos_emergency; 532 int dma_dbg_cnt_mode; 533 int catastrophic_err; 534 int dma_err; 535 u32 exceptions; 536 u16 clk_freq; 537 u32 cam_base_addr; 538 u32 fifo_base_addr; 539 u32 total_fifo_size; 540 u32 total_num_of_tasks; 541 u32 qmi_def_tnums_thresh; 542 }; 543 544 #ifdef CONFIG_DPAA_ERRATUM_A050385 545 static bool fman_has_err_a050385; 546 #endif 547 548 static irqreturn_t fman_exceptions(struct fman *fman, 549 enum fman_exceptions exception) 550 { 551 dev_dbg(fman->dev, "%s: FMan[%d] exception %d\n", 552 __func__, fman->state->fm_id, exception); 553 554 return IRQ_HANDLED; 555 } 556 557 static irqreturn_t fman_bus_error(struct fman *fman, u8 __maybe_unused port_id, 558 u64 __maybe_unused addr, 559 u8 __maybe_unused tnum, 560 u16 __maybe_unused liodn) 561 { 562 dev_dbg(fman->dev, "%s: FMan[%d] bus error: port_id[%d]\n", 563 __func__, fman->state->fm_id, port_id); 564 565 return IRQ_HANDLED; 566 } 567 568 static inline irqreturn_t call_mac_isr(struct fman *fman, u8 id) 569 { 570 if (fman->intr_mng[id].isr_cb) { 571 fman->intr_mng[id].isr_cb(fman->intr_mng[id].src_handle); 572 573 return IRQ_HANDLED; 574 } 575 576 return IRQ_NONE; 577 } 578 579 static inline u8 hw_port_id_to_sw_port_id(u8 major, u8 hw_port_id) 580 { 581 u8 sw_port_id = 0; 582 583 if (hw_port_id >= BASE_TX_PORTID) 584 sw_port_id = hw_port_id - BASE_TX_PORTID; 585 else if (hw_port_id >= BASE_RX_PORTID) 586 sw_port_id = hw_port_id - BASE_RX_PORTID; 587 else 588 sw_port_id = 0; 589 590 return sw_port_id; 591 } 592 593 static void set_port_order_restoration(struct fman_fpm_regs __iomem *fpm_rg, 594 u8 port_id) 595 { 596 u32 tmp = 0; 597 598 tmp = port_id << FPM_PORT_FM_CTL_PORTID_SHIFT; 599 600 tmp |= FPM_PRT_FM_CTL2 | FPM_PRT_FM_CTL1; 601 602 /* order restoration */ 603 if (port_id % 2) 604 tmp |= FPM_PRT_FM_CTL1 << FPM_PRC_ORA_FM_CTL_SEL_SHIFT; 605 else 606 tmp |= FPM_PRT_FM_CTL2 << FPM_PRC_ORA_FM_CTL_SEL_SHIFT; 607 608 iowrite32be(tmp, &fpm_rg->fmfp_prc); 609 } 610 611 static void set_port_liodn(struct fman *fman, u8 port_id, 612 u32 liodn_base, u32 liodn_ofst) 613 { 614 u32 tmp; 615 616 iowrite32be(liodn_ofst, &fman->bmi_regs->fmbm_spliodn[port_id - 1]); 617 if (!IS_ENABLED(CONFIG_FSL_PAMU)) 618 return; 619 /* set LIODN base for this port */ 620 tmp = ioread32be(&fman->dma_regs->fmdmplr[port_id / 2]); 621 if (port_id % 2) { 622 tmp &= ~DMA_LIODN_BASE_MASK; 623 tmp |= liodn_base; 624 } else { 625 tmp &= ~(DMA_LIODN_BASE_MASK << DMA_LIODN_SHIFT); 626 tmp |= liodn_base << DMA_LIODN_SHIFT; 627 } 628 iowrite32be(tmp, &fman->dma_regs->fmdmplr[port_id / 2]); 629 } 630 631 static void enable_rams_ecc(struct fman_fpm_regs __iomem *fpm_rg) 632 { 633 u32 tmp; 634 635 tmp = ioread32be(&fpm_rg->fm_rcr); 636 if (tmp & FPM_RAM_RAMS_ECC_EN_SRC_SEL) 637 iowrite32be(tmp | FPM_RAM_IRAM_ECC_EN, &fpm_rg->fm_rcr); 638 else 639 iowrite32be(tmp | FPM_RAM_RAMS_ECC_EN | 640 FPM_RAM_IRAM_ECC_EN, &fpm_rg->fm_rcr); 641 } 642 643 static void disable_rams_ecc(struct fman_fpm_regs __iomem *fpm_rg) 644 { 645 u32 tmp; 646 647 tmp = ioread32be(&fpm_rg->fm_rcr); 648 if (tmp & FPM_RAM_RAMS_ECC_EN_SRC_SEL) 649 iowrite32be(tmp & ~FPM_RAM_IRAM_ECC_EN, &fpm_rg->fm_rcr); 650 else 651 iowrite32be(tmp & ~(FPM_RAM_RAMS_ECC_EN | FPM_RAM_IRAM_ECC_EN), 652 &fpm_rg->fm_rcr); 653 } 654 655 static void fman_defconfig(struct fman_cfg *cfg) 656 { 657 memset(cfg, 0, sizeof(struct fman_cfg)); 658 659 cfg->catastrophic_err = DEFAULT_CATASTROPHIC_ERR; 660 cfg->dma_err = DEFAULT_DMA_ERR; 661 cfg->dma_aid_mode = DEFAULT_AID_MODE; 662 cfg->dma_comm_qtsh_clr_emer = DEFAULT_DMA_COMM_Q_LOW; 663 cfg->dma_comm_qtsh_asrt_emer = DEFAULT_DMA_COMM_Q_HIGH; 664 cfg->dma_cache_override = DEFAULT_CACHE_OVERRIDE; 665 cfg->dma_cam_num_of_entries = DEFAULT_DMA_CAM_NUM_OF_ENTRIES; 666 cfg->dma_dbg_cnt_mode = DEFAULT_DMA_DBG_CNT_MODE; 667 cfg->dma_sos_emergency = DEFAULT_DMA_SOS_EMERGENCY; 668 cfg->dma_watchdog = DEFAULT_DMA_WATCHDOG; 669 cfg->disp_limit_tsh = DEFAULT_DISP_LIMIT; 670 cfg->prs_disp_tsh = DEFAULT_PRS_DISP_TH; 671 cfg->plcr_disp_tsh = DEFAULT_PLCR_DISP_TH; 672 cfg->kg_disp_tsh = DEFAULT_KG_DISP_TH; 673 cfg->bmi_disp_tsh = DEFAULT_BMI_DISP_TH; 674 cfg->qmi_enq_disp_tsh = DEFAULT_QMI_ENQ_DISP_TH; 675 cfg->qmi_deq_disp_tsh = DEFAULT_QMI_DEQ_DISP_TH; 676 cfg->fm_ctl1_disp_tsh = DEFAULT_FM_CTL1_DISP_TH; 677 cfg->fm_ctl2_disp_tsh = DEFAULT_FM_CTL2_DISP_TH; 678 } 679 680 static int dma_init(struct fman *fman) 681 { 682 struct fman_dma_regs __iomem *dma_rg = fman->dma_regs; 683 struct fman_cfg *cfg = fman->cfg; 684 u32 tmp_reg; 685 686 /* Init DMA Registers */ 687 688 /* clear status reg events */ 689 tmp_reg = (DMA_STATUS_BUS_ERR | DMA_STATUS_READ_ECC | 690 DMA_STATUS_SYSTEM_WRITE_ECC | DMA_STATUS_FM_WRITE_ECC); 691 iowrite32be(ioread32be(&dma_rg->fmdmsr) | tmp_reg, &dma_rg->fmdmsr); 692 693 /* configure mode register */ 694 tmp_reg = 0; 695 tmp_reg |= cfg->dma_cache_override << DMA_MODE_CACHE_OR_SHIFT; 696 if (cfg->exceptions & EX_DMA_BUS_ERROR) 697 tmp_reg |= DMA_MODE_BER; 698 if ((cfg->exceptions & EX_DMA_SYSTEM_WRITE_ECC) | 699 (cfg->exceptions & EX_DMA_READ_ECC) | 700 (cfg->exceptions & EX_DMA_FM_WRITE_ECC)) 701 tmp_reg |= DMA_MODE_ECC; 702 if (cfg->dma_axi_dbg_num_of_beats) 703 tmp_reg |= (DMA_MODE_AXI_DBG_MASK & 704 ((cfg->dma_axi_dbg_num_of_beats - 1) 705 << DMA_MODE_AXI_DBG_SHIFT)); 706 707 tmp_reg |= (((cfg->dma_cam_num_of_entries / DMA_CAM_UNITS) - 1) & 708 DMA_MODE_CEN_MASK) << DMA_MODE_CEN_SHIFT; 709 tmp_reg |= DMA_MODE_SECURE_PROT; 710 tmp_reg |= cfg->dma_dbg_cnt_mode << DMA_MODE_DBG_SHIFT; 711 tmp_reg |= cfg->dma_aid_mode << DMA_MODE_AID_MODE_SHIFT; 712 713 iowrite32be(tmp_reg, &dma_rg->fmdmmr); 714 715 /* configure thresholds register */ 716 tmp_reg = ((u32)cfg->dma_comm_qtsh_asrt_emer << 717 DMA_THRESH_COMMQ_SHIFT); 718 tmp_reg |= (cfg->dma_read_buf_tsh_asrt_emer & 719 DMA_THRESH_READ_INT_BUF_MASK) << DMA_THRESH_READ_INT_BUF_SHIFT; 720 tmp_reg |= cfg->dma_write_buf_tsh_asrt_emer & 721 DMA_THRESH_WRITE_INT_BUF_MASK; 722 723 iowrite32be(tmp_reg, &dma_rg->fmdmtr); 724 725 /* configure hysteresis register */ 726 tmp_reg = ((u32)cfg->dma_comm_qtsh_clr_emer << 727 DMA_THRESH_COMMQ_SHIFT); 728 tmp_reg |= (cfg->dma_read_buf_tsh_clr_emer & 729 DMA_THRESH_READ_INT_BUF_MASK) << DMA_THRESH_READ_INT_BUF_SHIFT; 730 tmp_reg |= cfg->dma_write_buf_tsh_clr_emer & 731 DMA_THRESH_WRITE_INT_BUF_MASK; 732 733 iowrite32be(tmp_reg, &dma_rg->fmdmhy); 734 735 /* configure emergency threshold */ 736 iowrite32be(cfg->dma_sos_emergency, &dma_rg->fmdmsetr); 737 738 /* configure Watchdog */ 739 iowrite32be((cfg->dma_watchdog * cfg->clk_freq), &dma_rg->fmdmwcr); 740 741 iowrite32be(cfg->cam_base_addr, &dma_rg->fmdmebcr); 742 743 /* Allocate MURAM for CAM */ 744 fman->cam_size = 745 (u32)(fman->cfg->dma_cam_num_of_entries * DMA_CAM_SIZEOF_ENTRY); 746 fman->cam_offset = fman_muram_alloc(fman->muram, fman->cam_size); 747 if (IS_ERR_VALUE(fman->cam_offset)) { 748 dev_err(fman->dev, "%s: MURAM alloc for DMA CAM failed\n", 749 __func__); 750 return -ENOMEM; 751 } 752 753 if (fman->state->rev_info.major == 2) { 754 u32 __iomem *cam_base_addr; 755 756 fman_muram_free_mem(fman->muram, fman->cam_offset, 757 fman->cam_size); 758 759 fman->cam_size = fman->cfg->dma_cam_num_of_entries * 72 + 128; 760 fman->cam_offset = fman_muram_alloc(fman->muram, 761 fman->cam_size); 762 if (IS_ERR_VALUE(fman->cam_offset)) { 763 dev_err(fman->dev, "%s: MURAM alloc for DMA CAM failed\n", 764 __func__); 765 return -ENOMEM; 766 } 767 768 if (fman->cfg->dma_cam_num_of_entries % 8 || 769 fman->cfg->dma_cam_num_of_entries > 32) { 770 dev_err(fman->dev, "%s: wrong dma_cam_num_of_entries\n", 771 __func__); 772 return -EINVAL; 773 } 774 775 cam_base_addr = (u32 __iomem *) 776 fman_muram_offset_to_vbase(fman->muram, 777 fman->cam_offset); 778 iowrite32be(~((1 << 779 (32 - fman->cfg->dma_cam_num_of_entries)) - 1), 780 cam_base_addr); 781 } 782 783 fman->cfg->cam_base_addr = fman->cam_offset; 784 785 return 0; 786 } 787 788 static void fpm_init(struct fman_fpm_regs __iomem *fpm_rg, struct fman_cfg *cfg) 789 { 790 u32 tmp_reg; 791 int i; 792 793 /* Init FPM Registers */ 794 795 tmp_reg = (u32)(cfg->disp_limit_tsh << FPM_DISP_LIMIT_SHIFT); 796 iowrite32be(tmp_reg, &fpm_rg->fmfp_mxd); 797 798 tmp_reg = (((u32)cfg->prs_disp_tsh << FPM_THR1_PRS_SHIFT) | 799 ((u32)cfg->kg_disp_tsh << FPM_THR1_KG_SHIFT) | 800 ((u32)cfg->plcr_disp_tsh << FPM_THR1_PLCR_SHIFT) | 801 ((u32)cfg->bmi_disp_tsh << FPM_THR1_BMI_SHIFT)); 802 iowrite32be(tmp_reg, &fpm_rg->fmfp_dist1); 803 804 tmp_reg = 805 (((u32)cfg->qmi_enq_disp_tsh << FPM_THR2_QMI_ENQ_SHIFT) | 806 ((u32)cfg->qmi_deq_disp_tsh << FPM_THR2_QMI_DEQ_SHIFT) | 807 ((u32)cfg->fm_ctl1_disp_tsh << FPM_THR2_FM_CTL1_SHIFT) | 808 ((u32)cfg->fm_ctl2_disp_tsh << FPM_THR2_FM_CTL2_SHIFT)); 809 iowrite32be(tmp_reg, &fpm_rg->fmfp_dist2); 810 811 /* define exceptions and error behavior */ 812 tmp_reg = 0; 813 /* Clear events */ 814 tmp_reg |= (FPM_EV_MASK_STALL | FPM_EV_MASK_DOUBLE_ECC | 815 FPM_EV_MASK_SINGLE_ECC); 816 /* enable interrupts */ 817 if (cfg->exceptions & EX_FPM_STALL_ON_TASKS) 818 tmp_reg |= FPM_EV_MASK_STALL_EN; 819 if (cfg->exceptions & EX_FPM_SINGLE_ECC) 820 tmp_reg |= FPM_EV_MASK_SINGLE_ECC_EN; 821 if (cfg->exceptions & EX_FPM_DOUBLE_ECC) 822 tmp_reg |= FPM_EV_MASK_DOUBLE_ECC_EN; 823 tmp_reg |= (cfg->catastrophic_err << FPM_EV_MASK_CAT_ERR_SHIFT); 824 tmp_reg |= (cfg->dma_err << FPM_EV_MASK_DMA_ERR_SHIFT); 825 /* FMan is not halted upon external halt activation */ 826 tmp_reg |= FPM_EV_MASK_EXTERNAL_HALT; 827 /* Man is not halted upon Unrecoverable ECC error behavior */ 828 tmp_reg |= FPM_EV_MASK_ECC_ERR_HALT; 829 iowrite32be(tmp_reg, &fpm_rg->fmfp_ee); 830 831 /* clear all fmCtls event registers */ 832 for (i = 0; i < FM_NUM_OF_FMAN_CTRL_EVENT_REGS; i++) 833 iowrite32be(0xFFFFFFFF, &fpm_rg->fmfp_cev[i]); 834 835 /* RAM ECC - enable and clear events */ 836 /* first we need to clear all parser memory, 837 * as it is uninitialized and may cause ECC errors 838 */ 839 /* event bits */ 840 tmp_reg = (FPM_RAM_MURAM_ECC | FPM_RAM_IRAM_ECC); 841 842 iowrite32be(tmp_reg, &fpm_rg->fm_rcr); 843 844 tmp_reg = 0; 845 if (cfg->exceptions & EX_IRAM_ECC) { 846 tmp_reg |= FPM_IRAM_ECC_ERR_EX_EN; 847 enable_rams_ecc(fpm_rg); 848 } 849 if (cfg->exceptions & EX_MURAM_ECC) { 850 tmp_reg |= FPM_MURAM_ECC_ERR_EX_EN; 851 enable_rams_ecc(fpm_rg); 852 } 853 iowrite32be(tmp_reg, &fpm_rg->fm_rie); 854 } 855 856 static void bmi_init(struct fman_bmi_regs __iomem *bmi_rg, 857 struct fman_cfg *cfg) 858 { 859 u32 tmp_reg; 860 861 /* Init BMI Registers */ 862 863 /* define common resources */ 864 tmp_reg = cfg->fifo_base_addr; 865 tmp_reg = tmp_reg / BMI_FIFO_ALIGN; 866 867 tmp_reg |= ((cfg->total_fifo_size / FMAN_BMI_FIFO_UNITS - 1) << 868 BMI_CFG1_FIFO_SIZE_SHIFT); 869 iowrite32be(tmp_reg, &bmi_rg->fmbm_cfg1); 870 871 tmp_reg = ((cfg->total_num_of_tasks - 1) & BMI_CFG2_TASKS_MASK) << 872 BMI_CFG2_TASKS_SHIFT; 873 /* num of DMA's will be dynamically updated when each port is set */ 874 iowrite32be(tmp_reg, &bmi_rg->fmbm_cfg2); 875 876 /* define unmaskable exceptions, enable and clear events */ 877 tmp_reg = 0; 878 iowrite32be(BMI_ERR_INTR_EN_LIST_RAM_ECC | 879 BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC | 880 BMI_ERR_INTR_EN_STATISTICS_RAM_ECC | 881 BMI_ERR_INTR_EN_DISPATCH_RAM_ECC, &bmi_rg->fmbm_ievr); 882 883 if (cfg->exceptions & EX_BMI_LIST_RAM_ECC) 884 tmp_reg |= BMI_ERR_INTR_EN_LIST_RAM_ECC; 885 if (cfg->exceptions & EX_BMI_STORAGE_PROFILE_ECC) 886 tmp_reg |= BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC; 887 if (cfg->exceptions & EX_BMI_STATISTICS_RAM_ECC) 888 tmp_reg |= BMI_ERR_INTR_EN_STATISTICS_RAM_ECC; 889 if (cfg->exceptions & EX_BMI_DISPATCH_RAM_ECC) 890 tmp_reg |= BMI_ERR_INTR_EN_DISPATCH_RAM_ECC; 891 iowrite32be(tmp_reg, &bmi_rg->fmbm_ier); 892 } 893 894 static void qmi_init(struct fman_qmi_regs __iomem *qmi_rg, 895 struct fman_cfg *cfg) 896 { 897 u32 tmp_reg; 898 899 /* Init QMI Registers */ 900 901 /* Clear error interrupt events */ 902 903 iowrite32be(QMI_ERR_INTR_EN_DOUBLE_ECC | QMI_ERR_INTR_EN_DEQ_FROM_DEF, 904 &qmi_rg->fmqm_eie); 905 tmp_reg = 0; 906 if (cfg->exceptions & EX_QMI_DEQ_FROM_UNKNOWN_PORTID) 907 tmp_reg |= QMI_ERR_INTR_EN_DEQ_FROM_DEF; 908 if (cfg->exceptions & EX_QMI_DOUBLE_ECC) 909 tmp_reg |= QMI_ERR_INTR_EN_DOUBLE_ECC; 910 /* enable events */ 911 iowrite32be(tmp_reg, &qmi_rg->fmqm_eien); 912 913 tmp_reg = 0; 914 /* Clear interrupt events */ 915 iowrite32be(QMI_INTR_EN_SINGLE_ECC, &qmi_rg->fmqm_ie); 916 if (cfg->exceptions & EX_QMI_SINGLE_ECC) 917 tmp_reg |= QMI_INTR_EN_SINGLE_ECC; 918 /* enable events */ 919 iowrite32be(tmp_reg, &qmi_rg->fmqm_ien); 920 } 921 922 static void hwp_init(struct fman_hwp_regs __iomem *hwp_rg) 923 { 924 /* enable HW Parser */ 925 iowrite32be(HWP_RPIMAC_PEN, &hwp_rg->fmprrpimac); 926 } 927 928 static int enable(struct fman *fman, struct fman_cfg *cfg) 929 { 930 u32 cfg_reg = 0; 931 932 /* Enable all modules */ 933 934 /* clear&enable global counters - calculate reg and save for later, 935 * because it's the same reg for QMI enable 936 */ 937 cfg_reg = QMI_CFG_EN_COUNTERS; 938 939 /* Set enqueue and dequeue thresholds */ 940 cfg_reg |= (cfg->qmi_def_tnums_thresh << 8) | cfg->qmi_def_tnums_thresh; 941 942 iowrite32be(BMI_INIT_START, &fman->bmi_regs->fmbm_init); 943 iowrite32be(cfg_reg | QMI_CFG_ENQ_EN | QMI_CFG_DEQ_EN, 944 &fman->qmi_regs->fmqm_gc); 945 946 return 0; 947 } 948 949 static int set_exception(struct fman *fman, 950 enum fman_exceptions exception, bool enable) 951 { 952 u32 tmp; 953 954 switch (exception) { 955 case FMAN_EX_DMA_BUS_ERROR: 956 tmp = ioread32be(&fman->dma_regs->fmdmmr); 957 if (enable) 958 tmp |= DMA_MODE_BER; 959 else 960 tmp &= ~DMA_MODE_BER; 961 /* disable bus error */ 962 iowrite32be(tmp, &fman->dma_regs->fmdmmr); 963 break; 964 case FMAN_EX_DMA_READ_ECC: 965 case FMAN_EX_DMA_SYSTEM_WRITE_ECC: 966 case FMAN_EX_DMA_FM_WRITE_ECC: 967 tmp = ioread32be(&fman->dma_regs->fmdmmr); 968 if (enable) 969 tmp |= DMA_MODE_ECC; 970 else 971 tmp &= ~DMA_MODE_ECC; 972 iowrite32be(tmp, &fman->dma_regs->fmdmmr); 973 break; 974 case FMAN_EX_FPM_STALL_ON_TASKS: 975 tmp = ioread32be(&fman->fpm_regs->fmfp_ee); 976 if (enable) 977 tmp |= FPM_EV_MASK_STALL_EN; 978 else 979 tmp &= ~FPM_EV_MASK_STALL_EN; 980 iowrite32be(tmp, &fman->fpm_regs->fmfp_ee); 981 break; 982 case FMAN_EX_FPM_SINGLE_ECC: 983 tmp = ioread32be(&fman->fpm_regs->fmfp_ee); 984 if (enable) 985 tmp |= FPM_EV_MASK_SINGLE_ECC_EN; 986 else 987 tmp &= ~FPM_EV_MASK_SINGLE_ECC_EN; 988 iowrite32be(tmp, &fman->fpm_regs->fmfp_ee); 989 break; 990 case FMAN_EX_FPM_DOUBLE_ECC: 991 tmp = ioread32be(&fman->fpm_regs->fmfp_ee); 992 if (enable) 993 tmp |= FPM_EV_MASK_DOUBLE_ECC_EN; 994 else 995 tmp &= ~FPM_EV_MASK_DOUBLE_ECC_EN; 996 iowrite32be(tmp, &fman->fpm_regs->fmfp_ee); 997 break; 998 case FMAN_EX_QMI_SINGLE_ECC: 999 tmp = ioread32be(&fman->qmi_regs->fmqm_ien); 1000 if (enable) 1001 tmp |= QMI_INTR_EN_SINGLE_ECC; 1002 else 1003 tmp &= ~QMI_INTR_EN_SINGLE_ECC; 1004 iowrite32be(tmp, &fman->qmi_regs->fmqm_ien); 1005 break; 1006 case FMAN_EX_QMI_DOUBLE_ECC: 1007 tmp = ioread32be(&fman->qmi_regs->fmqm_eien); 1008 if (enable) 1009 tmp |= QMI_ERR_INTR_EN_DOUBLE_ECC; 1010 else 1011 tmp &= ~QMI_ERR_INTR_EN_DOUBLE_ECC; 1012 iowrite32be(tmp, &fman->qmi_regs->fmqm_eien); 1013 break; 1014 case FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID: 1015 tmp = ioread32be(&fman->qmi_regs->fmqm_eien); 1016 if (enable) 1017 tmp |= QMI_ERR_INTR_EN_DEQ_FROM_DEF; 1018 else 1019 tmp &= ~QMI_ERR_INTR_EN_DEQ_FROM_DEF; 1020 iowrite32be(tmp, &fman->qmi_regs->fmqm_eien); 1021 break; 1022 case FMAN_EX_BMI_LIST_RAM_ECC: 1023 tmp = ioread32be(&fman->bmi_regs->fmbm_ier); 1024 if (enable) 1025 tmp |= BMI_ERR_INTR_EN_LIST_RAM_ECC; 1026 else 1027 tmp &= ~BMI_ERR_INTR_EN_LIST_RAM_ECC; 1028 iowrite32be(tmp, &fman->bmi_regs->fmbm_ier); 1029 break; 1030 case FMAN_EX_BMI_STORAGE_PROFILE_ECC: 1031 tmp = ioread32be(&fman->bmi_regs->fmbm_ier); 1032 if (enable) 1033 tmp |= BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC; 1034 else 1035 tmp &= ~BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC; 1036 iowrite32be(tmp, &fman->bmi_regs->fmbm_ier); 1037 break; 1038 case FMAN_EX_BMI_STATISTICS_RAM_ECC: 1039 tmp = ioread32be(&fman->bmi_regs->fmbm_ier); 1040 if (enable) 1041 tmp |= BMI_ERR_INTR_EN_STATISTICS_RAM_ECC; 1042 else 1043 tmp &= ~BMI_ERR_INTR_EN_STATISTICS_RAM_ECC; 1044 iowrite32be(tmp, &fman->bmi_regs->fmbm_ier); 1045 break; 1046 case FMAN_EX_BMI_DISPATCH_RAM_ECC: 1047 tmp = ioread32be(&fman->bmi_regs->fmbm_ier); 1048 if (enable) 1049 tmp |= BMI_ERR_INTR_EN_DISPATCH_RAM_ECC; 1050 else 1051 tmp &= ~BMI_ERR_INTR_EN_DISPATCH_RAM_ECC; 1052 iowrite32be(tmp, &fman->bmi_regs->fmbm_ier); 1053 break; 1054 case FMAN_EX_IRAM_ECC: 1055 tmp = ioread32be(&fman->fpm_regs->fm_rie); 1056 if (enable) { 1057 /* enable ECC if not enabled */ 1058 enable_rams_ecc(fman->fpm_regs); 1059 /* enable ECC interrupts */ 1060 tmp |= FPM_IRAM_ECC_ERR_EX_EN; 1061 } else { 1062 /* ECC mechanism may be disabled, 1063 * depending on driver status 1064 */ 1065 disable_rams_ecc(fman->fpm_regs); 1066 tmp &= ~FPM_IRAM_ECC_ERR_EX_EN; 1067 } 1068 iowrite32be(tmp, &fman->fpm_regs->fm_rie); 1069 break; 1070 case FMAN_EX_MURAM_ECC: 1071 tmp = ioread32be(&fman->fpm_regs->fm_rie); 1072 if (enable) { 1073 /* enable ECC if not enabled */ 1074 enable_rams_ecc(fman->fpm_regs); 1075 /* enable ECC interrupts */ 1076 tmp |= FPM_MURAM_ECC_ERR_EX_EN; 1077 } else { 1078 /* ECC mechanism may be disabled, 1079 * depending on driver status 1080 */ 1081 disable_rams_ecc(fman->fpm_regs); 1082 tmp &= ~FPM_MURAM_ECC_ERR_EX_EN; 1083 } 1084 iowrite32be(tmp, &fman->fpm_regs->fm_rie); 1085 break; 1086 default: 1087 return -EINVAL; 1088 } 1089 return 0; 1090 } 1091 1092 static void resume(struct fman_fpm_regs __iomem *fpm_rg) 1093 { 1094 u32 tmp; 1095 1096 tmp = ioread32be(&fpm_rg->fmfp_ee); 1097 /* clear tmp_reg event bits in order not to clear standing events */ 1098 tmp &= ~(FPM_EV_MASK_DOUBLE_ECC | 1099 FPM_EV_MASK_STALL | FPM_EV_MASK_SINGLE_ECC); 1100 tmp |= FPM_EV_MASK_RELEASE_FM; 1101 1102 iowrite32be(tmp, &fpm_rg->fmfp_ee); 1103 } 1104 1105 static int fill_soc_specific_params(struct fman_state_struct *state) 1106 { 1107 u8 minor = state->rev_info.minor; 1108 /* P4080 - Major 2 1109 * P2041/P3041/P5020/P5040 - Major 3 1110 * Tx/Bx - Major 6 1111 */ 1112 switch (state->rev_info.major) { 1113 case 3: 1114 state->bmi_max_fifo_size = 160 * 1024; 1115 state->fm_iram_size = 64 * 1024; 1116 state->dma_thresh_max_commq = 31; 1117 state->dma_thresh_max_buf = 127; 1118 state->qmi_max_num_of_tnums = 64; 1119 state->qmi_def_tnums_thresh = 48; 1120 state->bmi_max_num_of_tasks = 128; 1121 state->max_num_of_open_dmas = 32; 1122 state->fm_port_num_of_cg = 256; 1123 state->num_of_rx_ports = 6; 1124 state->total_fifo_size = 136 * 1024; 1125 break; 1126 1127 case 2: 1128 state->bmi_max_fifo_size = 160 * 1024; 1129 state->fm_iram_size = 64 * 1024; 1130 state->dma_thresh_max_commq = 31; 1131 state->dma_thresh_max_buf = 127; 1132 state->qmi_max_num_of_tnums = 64; 1133 state->qmi_def_tnums_thresh = 48; 1134 state->bmi_max_num_of_tasks = 128; 1135 state->max_num_of_open_dmas = 32; 1136 state->fm_port_num_of_cg = 256; 1137 state->num_of_rx_ports = 5; 1138 state->total_fifo_size = 100 * 1024; 1139 break; 1140 1141 case 6: 1142 state->dma_thresh_max_commq = 83; 1143 state->dma_thresh_max_buf = 127; 1144 state->qmi_max_num_of_tnums = 64; 1145 state->qmi_def_tnums_thresh = 32; 1146 state->fm_port_num_of_cg = 256; 1147 1148 /* FManV3L */ 1149 if (minor == 1 || minor == 4) { 1150 state->bmi_max_fifo_size = 192 * 1024; 1151 state->bmi_max_num_of_tasks = 64; 1152 state->max_num_of_open_dmas = 32; 1153 state->num_of_rx_ports = 5; 1154 if (minor == 1) 1155 state->fm_iram_size = 32 * 1024; 1156 else 1157 state->fm_iram_size = 64 * 1024; 1158 state->total_fifo_size = 156 * 1024; 1159 } 1160 /* FManV3H */ 1161 else if (minor == 0 || minor == 2 || minor == 3) { 1162 state->bmi_max_fifo_size = 384 * 1024; 1163 state->fm_iram_size = 64 * 1024; 1164 state->bmi_max_num_of_tasks = 128; 1165 state->max_num_of_open_dmas = 84; 1166 state->num_of_rx_ports = 8; 1167 state->total_fifo_size = 295 * 1024; 1168 } else { 1169 pr_err("Unsupported FManv3 version\n"); 1170 return -EINVAL; 1171 } 1172 1173 break; 1174 default: 1175 pr_err("Unsupported FMan version\n"); 1176 return -EINVAL; 1177 } 1178 1179 return 0; 1180 } 1181 1182 static bool is_init_done(struct fman_cfg *cfg) 1183 { 1184 /* Checks if FMan driver parameters were initialized */ 1185 if (!cfg) 1186 return true; 1187 1188 return false; 1189 } 1190 1191 static void free_init_resources(struct fman *fman) 1192 { 1193 if (fman->cam_offset) 1194 fman_muram_free_mem(fman->muram, fman->cam_offset, 1195 fman->cam_size); 1196 if (fman->fifo_offset) 1197 fman_muram_free_mem(fman->muram, fman->fifo_offset, 1198 fman->fifo_size); 1199 } 1200 1201 static irqreturn_t bmi_err_event(struct fman *fman) 1202 { 1203 u32 event, mask, force; 1204 struct fman_bmi_regs __iomem *bmi_rg = fman->bmi_regs; 1205 irqreturn_t ret = IRQ_NONE; 1206 1207 event = ioread32be(&bmi_rg->fmbm_ievr); 1208 mask = ioread32be(&bmi_rg->fmbm_ier); 1209 event &= mask; 1210 /* clear the forced events */ 1211 force = ioread32be(&bmi_rg->fmbm_ifr); 1212 if (force & event) 1213 iowrite32be(force & ~event, &bmi_rg->fmbm_ifr); 1214 /* clear the acknowledged events */ 1215 iowrite32be(event, &bmi_rg->fmbm_ievr); 1216 1217 if (event & BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC) 1218 ret = fman->exception_cb(fman, FMAN_EX_BMI_STORAGE_PROFILE_ECC); 1219 if (event & BMI_ERR_INTR_EN_LIST_RAM_ECC) 1220 ret = fman->exception_cb(fman, FMAN_EX_BMI_LIST_RAM_ECC); 1221 if (event & BMI_ERR_INTR_EN_STATISTICS_RAM_ECC) 1222 ret = fman->exception_cb(fman, FMAN_EX_BMI_STATISTICS_RAM_ECC); 1223 if (event & BMI_ERR_INTR_EN_DISPATCH_RAM_ECC) 1224 ret = fman->exception_cb(fman, FMAN_EX_BMI_DISPATCH_RAM_ECC); 1225 1226 return ret; 1227 } 1228 1229 static irqreturn_t qmi_err_event(struct fman *fman) 1230 { 1231 u32 event, mask, force; 1232 struct fman_qmi_regs __iomem *qmi_rg = fman->qmi_regs; 1233 irqreturn_t ret = IRQ_NONE; 1234 1235 event = ioread32be(&qmi_rg->fmqm_eie); 1236 mask = ioread32be(&qmi_rg->fmqm_eien); 1237 event &= mask; 1238 1239 /* clear the forced events */ 1240 force = ioread32be(&qmi_rg->fmqm_eif); 1241 if (force & event) 1242 iowrite32be(force & ~event, &qmi_rg->fmqm_eif); 1243 /* clear the acknowledged events */ 1244 iowrite32be(event, &qmi_rg->fmqm_eie); 1245 1246 if (event & QMI_ERR_INTR_EN_DOUBLE_ECC) 1247 ret = fman->exception_cb(fman, FMAN_EX_QMI_DOUBLE_ECC); 1248 if (event & QMI_ERR_INTR_EN_DEQ_FROM_DEF) 1249 ret = fman->exception_cb(fman, 1250 FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID); 1251 1252 return ret; 1253 } 1254 1255 static irqreturn_t dma_err_event(struct fman *fman) 1256 { 1257 u32 status, mask, com_id; 1258 u8 tnum, port_id, relative_port_id; 1259 u16 liodn; 1260 struct fman_dma_regs __iomem *dma_rg = fman->dma_regs; 1261 irqreturn_t ret = IRQ_NONE; 1262 1263 status = ioread32be(&dma_rg->fmdmsr); 1264 mask = ioread32be(&dma_rg->fmdmmr); 1265 1266 /* clear DMA_STATUS_BUS_ERR if mask has no DMA_MODE_BER */ 1267 if ((mask & DMA_MODE_BER) != DMA_MODE_BER) 1268 status &= ~DMA_STATUS_BUS_ERR; 1269 1270 /* clear relevant bits if mask has no DMA_MODE_ECC */ 1271 if ((mask & DMA_MODE_ECC) != DMA_MODE_ECC) 1272 status &= ~(DMA_STATUS_FM_SPDAT_ECC | 1273 DMA_STATUS_READ_ECC | 1274 DMA_STATUS_SYSTEM_WRITE_ECC | 1275 DMA_STATUS_FM_WRITE_ECC); 1276 1277 /* clear set events */ 1278 iowrite32be(status, &dma_rg->fmdmsr); 1279 1280 if (status & DMA_STATUS_BUS_ERR) { 1281 u64 addr; 1282 1283 addr = (u64)ioread32be(&dma_rg->fmdmtal); 1284 addr |= ((u64)(ioread32be(&dma_rg->fmdmtah)) << 32); 1285 1286 com_id = ioread32be(&dma_rg->fmdmtcid); 1287 port_id = (u8)(((com_id & DMA_TRANSFER_PORTID_MASK) >> 1288 DMA_TRANSFER_PORTID_SHIFT)); 1289 relative_port_id = 1290 hw_port_id_to_sw_port_id(fman->state->rev_info.major, port_id); 1291 tnum = (u8)((com_id & DMA_TRANSFER_TNUM_MASK) >> 1292 DMA_TRANSFER_TNUM_SHIFT); 1293 liodn = (u16)(com_id & DMA_TRANSFER_LIODN_MASK); 1294 ret = fman->bus_error_cb(fman, relative_port_id, addr, tnum, 1295 liodn); 1296 } 1297 if (status & DMA_STATUS_FM_SPDAT_ECC) 1298 ret = fman->exception_cb(fman, FMAN_EX_DMA_SINGLE_PORT_ECC); 1299 if (status & DMA_STATUS_READ_ECC) 1300 ret = fman->exception_cb(fman, FMAN_EX_DMA_READ_ECC); 1301 if (status & DMA_STATUS_SYSTEM_WRITE_ECC) 1302 ret = fman->exception_cb(fman, FMAN_EX_DMA_SYSTEM_WRITE_ECC); 1303 if (status & DMA_STATUS_FM_WRITE_ECC) 1304 ret = fman->exception_cb(fman, FMAN_EX_DMA_FM_WRITE_ECC); 1305 1306 return ret; 1307 } 1308 1309 static irqreturn_t fpm_err_event(struct fman *fman) 1310 { 1311 u32 event; 1312 struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs; 1313 irqreturn_t ret = IRQ_NONE; 1314 1315 event = ioread32be(&fpm_rg->fmfp_ee); 1316 /* clear the all occurred events */ 1317 iowrite32be(event, &fpm_rg->fmfp_ee); 1318 1319 if ((event & FPM_EV_MASK_DOUBLE_ECC) && 1320 (event & FPM_EV_MASK_DOUBLE_ECC_EN)) 1321 ret = fman->exception_cb(fman, FMAN_EX_FPM_DOUBLE_ECC); 1322 if ((event & FPM_EV_MASK_STALL) && (event & FPM_EV_MASK_STALL_EN)) 1323 ret = fman->exception_cb(fman, FMAN_EX_FPM_STALL_ON_TASKS); 1324 if ((event & FPM_EV_MASK_SINGLE_ECC) && 1325 (event & FPM_EV_MASK_SINGLE_ECC_EN)) 1326 ret = fman->exception_cb(fman, FMAN_EX_FPM_SINGLE_ECC); 1327 1328 return ret; 1329 } 1330 1331 static irqreturn_t muram_err_intr(struct fman *fman) 1332 { 1333 u32 event, mask; 1334 struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs; 1335 irqreturn_t ret = IRQ_NONE; 1336 1337 event = ioread32be(&fpm_rg->fm_rcr); 1338 mask = ioread32be(&fpm_rg->fm_rie); 1339 1340 /* clear MURAM event bit (do not clear IRAM event) */ 1341 iowrite32be(event & ~FPM_RAM_IRAM_ECC, &fpm_rg->fm_rcr); 1342 1343 if ((mask & FPM_MURAM_ECC_ERR_EX_EN) && (event & FPM_RAM_MURAM_ECC)) 1344 ret = fman->exception_cb(fman, FMAN_EX_MURAM_ECC); 1345 1346 return ret; 1347 } 1348 1349 static irqreturn_t qmi_event(struct fman *fman) 1350 { 1351 u32 event, mask, force; 1352 struct fman_qmi_regs __iomem *qmi_rg = fman->qmi_regs; 1353 irqreturn_t ret = IRQ_NONE; 1354 1355 event = ioread32be(&qmi_rg->fmqm_ie); 1356 mask = ioread32be(&qmi_rg->fmqm_ien); 1357 event &= mask; 1358 /* clear the forced events */ 1359 force = ioread32be(&qmi_rg->fmqm_if); 1360 if (force & event) 1361 iowrite32be(force & ~event, &qmi_rg->fmqm_if); 1362 /* clear the acknowledged events */ 1363 iowrite32be(event, &qmi_rg->fmqm_ie); 1364 1365 if (event & QMI_INTR_EN_SINGLE_ECC) 1366 ret = fman->exception_cb(fman, FMAN_EX_QMI_SINGLE_ECC); 1367 1368 return ret; 1369 } 1370 1371 static void enable_time_stamp(struct fman *fman) 1372 { 1373 struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs; 1374 u16 fm_clk_freq = fman->state->fm_clk_freq; 1375 u32 tmp, intgr, ts_freq, frac; 1376 1377 ts_freq = (u32)(1 << fman->state->count1_micro_bit); 1378 /* configure timestamp so that bit 8 will count 1 microsecond 1379 * Find effective count rate at TIMESTAMP least significant bits: 1380 * Effective_Count_Rate = 1MHz x 2^8 = 256MHz 1381 * Find frequency ratio between effective count rate and the clock: 1382 * Effective_Count_Rate / CLK e.g. for 600 MHz clock: 1383 * 256/600 = 0.4266666... 1384 */ 1385 1386 intgr = ts_freq / fm_clk_freq; 1387 /* we multiply by 2^16 to keep the fraction of the division 1388 * we do not div back, since we write this value as a fraction 1389 * see spec 1390 */ 1391 1392 frac = ((ts_freq << 16) - (intgr << 16) * fm_clk_freq) / fm_clk_freq; 1393 /* we check remainder of the division in order to round up if not int */ 1394 if (((ts_freq << 16) - (intgr << 16) * fm_clk_freq) % fm_clk_freq) 1395 frac++; 1396 1397 tmp = (intgr << FPM_TS_INT_SHIFT) | (u16)frac; 1398 iowrite32be(tmp, &fpm_rg->fmfp_tsc2); 1399 1400 /* enable timestamp with original clock */ 1401 iowrite32be(FPM_TS_CTL_EN, &fpm_rg->fmfp_tsc1); 1402 fman->state->enabled_time_stamp = true; 1403 } 1404 1405 static int clear_iram(struct fman *fman) 1406 { 1407 struct fman_iram_regs __iomem *iram; 1408 int i, count; 1409 1410 iram = fman->base_addr + IMEM_OFFSET; 1411 1412 /* Enable the auto-increment */ 1413 iowrite32be(IRAM_IADD_AIE, &iram->iadd); 1414 count = 100; 1415 do { 1416 udelay(1); 1417 } while ((ioread32be(&iram->iadd) != IRAM_IADD_AIE) && --count); 1418 if (count == 0) 1419 return -EBUSY; 1420 1421 for (i = 0; i < (fman->state->fm_iram_size / 4); i++) 1422 iowrite32be(0xffffffff, &iram->idata); 1423 1424 iowrite32be(fman->state->fm_iram_size - 4, &iram->iadd); 1425 count = 100; 1426 do { 1427 udelay(1); 1428 } while ((ioread32be(&iram->idata) != 0xffffffff) && --count); 1429 if (count == 0) 1430 return -EBUSY; 1431 1432 return 0; 1433 } 1434 1435 static u32 get_exception_flag(enum fman_exceptions exception) 1436 { 1437 u32 bit_mask; 1438 1439 switch (exception) { 1440 case FMAN_EX_DMA_BUS_ERROR: 1441 bit_mask = EX_DMA_BUS_ERROR; 1442 break; 1443 case FMAN_EX_DMA_SINGLE_PORT_ECC: 1444 bit_mask = EX_DMA_SINGLE_PORT_ECC; 1445 break; 1446 case FMAN_EX_DMA_READ_ECC: 1447 bit_mask = EX_DMA_READ_ECC; 1448 break; 1449 case FMAN_EX_DMA_SYSTEM_WRITE_ECC: 1450 bit_mask = EX_DMA_SYSTEM_WRITE_ECC; 1451 break; 1452 case FMAN_EX_DMA_FM_WRITE_ECC: 1453 bit_mask = EX_DMA_FM_WRITE_ECC; 1454 break; 1455 case FMAN_EX_FPM_STALL_ON_TASKS: 1456 bit_mask = EX_FPM_STALL_ON_TASKS; 1457 break; 1458 case FMAN_EX_FPM_SINGLE_ECC: 1459 bit_mask = EX_FPM_SINGLE_ECC; 1460 break; 1461 case FMAN_EX_FPM_DOUBLE_ECC: 1462 bit_mask = EX_FPM_DOUBLE_ECC; 1463 break; 1464 case FMAN_EX_QMI_SINGLE_ECC: 1465 bit_mask = EX_QMI_SINGLE_ECC; 1466 break; 1467 case FMAN_EX_QMI_DOUBLE_ECC: 1468 bit_mask = EX_QMI_DOUBLE_ECC; 1469 break; 1470 case FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID: 1471 bit_mask = EX_QMI_DEQ_FROM_UNKNOWN_PORTID; 1472 break; 1473 case FMAN_EX_BMI_LIST_RAM_ECC: 1474 bit_mask = EX_BMI_LIST_RAM_ECC; 1475 break; 1476 case FMAN_EX_BMI_STORAGE_PROFILE_ECC: 1477 bit_mask = EX_BMI_STORAGE_PROFILE_ECC; 1478 break; 1479 case FMAN_EX_BMI_STATISTICS_RAM_ECC: 1480 bit_mask = EX_BMI_STATISTICS_RAM_ECC; 1481 break; 1482 case FMAN_EX_BMI_DISPATCH_RAM_ECC: 1483 bit_mask = EX_BMI_DISPATCH_RAM_ECC; 1484 break; 1485 case FMAN_EX_MURAM_ECC: 1486 bit_mask = EX_MURAM_ECC; 1487 break; 1488 default: 1489 bit_mask = 0; 1490 break; 1491 } 1492 1493 return bit_mask; 1494 } 1495 1496 static int get_module_event(enum fman_event_modules module, u8 mod_id, 1497 enum fman_intr_type intr_type) 1498 { 1499 int event; 1500 1501 switch (module) { 1502 case FMAN_MOD_MAC: 1503 if (intr_type == FMAN_INTR_TYPE_ERR) 1504 event = FMAN_EV_ERR_MAC0 + mod_id; 1505 else 1506 event = FMAN_EV_MAC0 + mod_id; 1507 break; 1508 case FMAN_MOD_FMAN_CTRL: 1509 if (intr_type == FMAN_INTR_TYPE_ERR) 1510 event = FMAN_EV_CNT; 1511 else 1512 event = (FMAN_EV_FMAN_CTRL_0 + mod_id); 1513 break; 1514 case FMAN_MOD_DUMMY_LAST: 1515 event = FMAN_EV_CNT; 1516 break; 1517 default: 1518 event = FMAN_EV_CNT; 1519 break; 1520 } 1521 1522 return event; 1523 } 1524 1525 static int set_size_of_fifo(struct fman *fman, u8 port_id, u32 *size_of_fifo, 1526 u32 *extra_size_of_fifo) 1527 { 1528 struct fman_bmi_regs __iomem *bmi_rg = fman->bmi_regs; 1529 u32 fifo = *size_of_fifo; 1530 u32 extra_fifo = *extra_size_of_fifo; 1531 u32 tmp; 1532 1533 /* if this is the first time a port requires extra_fifo_pool_size, 1534 * the total extra_fifo_pool_size must be initialized to 1 buffer per 1535 * port 1536 */ 1537 if (extra_fifo && !fman->state->extra_fifo_pool_size) 1538 fman->state->extra_fifo_pool_size = 1539 fman->state->num_of_rx_ports * FMAN_BMI_FIFO_UNITS; 1540 1541 fman->state->extra_fifo_pool_size = 1542 max(fman->state->extra_fifo_pool_size, extra_fifo); 1543 1544 /* check that there are enough uncommitted fifo size */ 1545 if ((fman->state->accumulated_fifo_size + fifo) > 1546 (fman->state->total_fifo_size - 1547 fman->state->extra_fifo_pool_size)) { 1548 dev_err(fman->dev, "%s: Requested fifo size and extra size exceed total FIFO size.\n", 1549 __func__); 1550 return -EAGAIN; 1551 } 1552 1553 /* Read, modify and write to HW */ 1554 tmp = (fifo / FMAN_BMI_FIFO_UNITS - 1) | 1555 ((extra_fifo / FMAN_BMI_FIFO_UNITS) << 1556 BMI_EXTRA_FIFO_SIZE_SHIFT); 1557 iowrite32be(tmp, &bmi_rg->fmbm_pfs[port_id - 1]); 1558 1559 /* update accumulated */ 1560 fman->state->accumulated_fifo_size += fifo; 1561 1562 return 0; 1563 } 1564 1565 static int set_num_of_tasks(struct fman *fman, u8 port_id, u8 *num_of_tasks, 1566 u8 *num_of_extra_tasks) 1567 { 1568 struct fman_bmi_regs __iomem *bmi_rg = fman->bmi_regs; 1569 u8 tasks = *num_of_tasks; 1570 u8 extra_tasks = *num_of_extra_tasks; 1571 u32 tmp; 1572 1573 if (extra_tasks) 1574 fman->state->extra_tasks_pool_size = 1575 max(fman->state->extra_tasks_pool_size, extra_tasks); 1576 1577 /* check that there are enough uncommitted tasks */ 1578 if ((fman->state->accumulated_num_of_tasks + tasks) > 1579 (fman->state->total_num_of_tasks - 1580 fman->state->extra_tasks_pool_size)) { 1581 dev_err(fman->dev, "%s: Requested num_of_tasks and extra tasks pool for fm%d exceed total num_of_tasks.\n", 1582 __func__, fman->state->fm_id); 1583 return -EAGAIN; 1584 } 1585 /* update accumulated */ 1586 fman->state->accumulated_num_of_tasks += tasks; 1587 1588 /* Write to HW */ 1589 tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]) & 1590 ~(BMI_NUM_OF_TASKS_MASK | BMI_NUM_OF_EXTRA_TASKS_MASK); 1591 tmp |= ((u32)((tasks - 1) << BMI_NUM_OF_TASKS_SHIFT) | 1592 (u32)(extra_tasks << BMI_EXTRA_NUM_OF_TASKS_SHIFT)); 1593 iowrite32be(tmp, &bmi_rg->fmbm_pp[port_id - 1]); 1594 1595 return 0; 1596 } 1597 1598 static int set_num_of_open_dmas(struct fman *fman, u8 port_id, 1599 u8 *num_of_open_dmas, 1600 u8 *num_of_extra_open_dmas) 1601 { 1602 struct fman_bmi_regs __iomem *bmi_rg = fman->bmi_regs; 1603 u8 open_dmas = *num_of_open_dmas; 1604 u8 extra_open_dmas = *num_of_extra_open_dmas; 1605 u8 total_num_dmas = 0, current_val = 0, current_extra_val = 0; 1606 u32 tmp; 1607 1608 if (!open_dmas) { 1609 /* Configuration according to values in the HW. 1610 * read the current number of open Dma's 1611 */ 1612 tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]); 1613 current_extra_val = (u8)((tmp & BMI_NUM_OF_EXTRA_DMAS_MASK) >> 1614 BMI_EXTRA_NUM_OF_DMAS_SHIFT); 1615 1616 tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]); 1617 current_val = (u8)(((tmp & BMI_NUM_OF_DMAS_MASK) >> 1618 BMI_NUM_OF_DMAS_SHIFT) + 1); 1619 1620 /* This is the first configuration and user did not 1621 * specify value (!open_dmas), reset values will be used 1622 * and we just save these values for resource management 1623 */ 1624 fman->state->extra_open_dmas_pool_size = 1625 (u8)max(fman->state->extra_open_dmas_pool_size, 1626 current_extra_val); 1627 fman->state->accumulated_num_of_open_dmas += current_val; 1628 *num_of_open_dmas = current_val; 1629 *num_of_extra_open_dmas = current_extra_val; 1630 return 0; 1631 } 1632 1633 if (extra_open_dmas > current_extra_val) 1634 fman->state->extra_open_dmas_pool_size = 1635 (u8)max(fman->state->extra_open_dmas_pool_size, 1636 extra_open_dmas); 1637 1638 if ((fman->state->rev_info.major < 6) && 1639 (fman->state->accumulated_num_of_open_dmas - current_val + 1640 open_dmas > fman->state->max_num_of_open_dmas)) { 1641 dev_err(fman->dev, "%s: Requested num_of_open_dmas for fm%d exceeds total num_of_open_dmas.\n", 1642 __func__, fman->state->fm_id); 1643 return -EAGAIN; 1644 } else if ((fman->state->rev_info.major >= 6) && 1645 !((fman->state->rev_info.major == 6) && 1646 (fman->state->rev_info.minor == 0)) && 1647 (fman->state->accumulated_num_of_open_dmas - 1648 current_val + open_dmas > 1649 fman->state->dma_thresh_max_commq + 1)) { 1650 dev_err(fman->dev, "%s: Requested num_of_open_dmas for fm%d exceeds DMA Command queue (%d)\n", 1651 __func__, fman->state->fm_id, 1652 fman->state->dma_thresh_max_commq + 1); 1653 return -EAGAIN; 1654 } 1655 1656 WARN_ON(fman->state->accumulated_num_of_open_dmas < current_val); 1657 /* update acummulated */ 1658 fman->state->accumulated_num_of_open_dmas -= current_val; 1659 fman->state->accumulated_num_of_open_dmas += open_dmas; 1660 1661 if (fman->state->rev_info.major < 6) 1662 total_num_dmas = 1663 (u8)(fman->state->accumulated_num_of_open_dmas + 1664 fman->state->extra_open_dmas_pool_size); 1665 1666 /* calculate reg */ 1667 tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]) & 1668 ~(BMI_NUM_OF_DMAS_MASK | BMI_NUM_OF_EXTRA_DMAS_MASK); 1669 tmp |= (u32)(((open_dmas - 1) << BMI_NUM_OF_DMAS_SHIFT) | 1670 (extra_open_dmas << BMI_EXTRA_NUM_OF_DMAS_SHIFT)); 1671 iowrite32be(tmp, &bmi_rg->fmbm_pp[port_id - 1]); 1672 1673 /* update total num of DMA's with committed number of open DMAS, 1674 * and max uncommitted pool. 1675 */ 1676 if (total_num_dmas) { 1677 tmp = ioread32be(&bmi_rg->fmbm_cfg2) & ~BMI_CFG2_DMAS_MASK; 1678 tmp |= (u32)(total_num_dmas - 1) << BMI_CFG2_DMAS_SHIFT; 1679 iowrite32be(tmp, &bmi_rg->fmbm_cfg2); 1680 } 1681 1682 return 0; 1683 } 1684 1685 static int fman_config(struct fman *fman) 1686 { 1687 void __iomem *base_addr; 1688 int err; 1689 1690 base_addr = fman->dts_params.base_addr; 1691 1692 fman->state = kzalloc(sizeof(*fman->state), GFP_KERNEL); 1693 if (!fman->state) 1694 goto err_fm_state; 1695 1696 /* Allocate the FM driver's parameters structure */ 1697 fman->cfg = kzalloc(sizeof(*fman->cfg), GFP_KERNEL); 1698 if (!fman->cfg) 1699 goto err_fm_drv; 1700 1701 /* Initialize MURAM block */ 1702 fman->muram = 1703 fman_muram_init(fman->dts_params.muram_res.start, 1704 resource_size(&fman->dts_params.muram_res)); 1705 if (!fman->muram) 1706 goto err_fm_soc_specific; 1707 1708 /* Initialize FM parameters which will be kept by the driver */ 1709 fman->state->fm_id = fman->dts_params.id; 1710 fman->state->fm_clk_freq = fman->dts_params.clk_freq; 1711 fman->state->qman_channel_base = fman->dts_params.qman_channel_base; 1712 fman->state->num_of_qman_channels = 1713 fman->dts_params.num_of_qman_channels; 1714 fman->state->res = fman->dts_params.res; 1715 fman->exception_cb = fman_exceptions; 1716 fman->bus_error_cb = fman_bus_error; 1717 fman->fpm_regs = base_addr + FPM_OFFSET; 1718 fman->bmi_regs = base_addr + BMI_OFFSET; 1719 fman->qmi_regs = base_addr + QMI_OFFSET; 1720 fman->dma_regs = base_addr + DMA_OFFSET; 1721 fman->hwp_regs = base_addr + HWP_OFFSET; 1722 fman->kg_regs = base_addr + KG_OFFSET; 1723 fman->base_addr = base_addr; 1724 1725 spin_lock_init(&fman->spinlock); 1726 fman_defconfig(fman->cfg); 1727 1728 fman->state->extra_fifo_pool_size = 0; 1729 fman->state->exceptions = (EX_DMA_BUS_ERROR | 1730 EX_DMA_READ_ECC | 1731 EX_DMA_SYSTEM_WRITE_ECC | 1732 EX_DMA_FM_WRITE_ECC | 1733 EX_FPM_STALL_ON_TASKS | 1734 EX_FPM_SINGLE_ECC | 1735 EX_FPM_DOUBLE_ECC | 1736 EX_QMI_DEQ_FROM_UNKNOWN_PORTID | 1737 EX_BMI_LIST_RAM_ECC | 1738 EX_BMI_STORAGE_PROFILE_ECC | 1739 EX_BMI_STATISTICS_RAM_ECC | 1740 EX_MURAM_ECC | 1741 EX_BMI_DISPATCH_RAM_ECC | 1742 EX_QMI_DOUBLE_ECC | 1743 EX_QMI_SINGLE_ECC); 1744 1745 /* Read FMan revision for future use*/ 1746 fman_get_revision(fman, &fman->state->rev_info); 1747 1748 err = fill_soc_specific_params(fman->state); 1749 if (err) 1750 goto err_fm_soc_specific; 1751 1752 /* FM_AID_MODE_NO_TNUM_SW005 Errata workaround */ 1753 if (fman->state->rev_info.major >= 6) 1754 fman->cfg->dma_aid_mode = FMAN_DMA_AID_OUT_PORT_ID; 1755 1756 fman->cfg->qmi_def_tnums_thresh = fman->state->qmi_def_tnums_thresh; 1757 1758 fman->state->total_num_of_tasks = 1759 (u8)DFLT_TOTAL_NUM_OF_TASKS(fman->state->rev_info.major, 1760 fman->state->rev_info.minor, 1761 fman->state->bmi_max_num_of_tasks); 1762 1763 if (fman->state->rev_info.major < 6) { 1764 fman->cfg->dma_comm_qtsh_clr_emer = 1765 (u8)DFLT_DMA_COMM_Q_LOW(fman->state->rev_info.major, 1766 fman->state->dma_thresh_max_commq); 1767 1768 fman->cfg->dma_comm_qtsh_asrt_emer = 1769 (u8)DFLT_DMA_COMM_Q_HIGH(fman->state->rev_info.major, 1770 fman->state->dma_thresh_max_commq); 1771 1772 fman->cfg->dma_cam_num_of_entries = 1773 DFLT_DMA_CAM_NUM_OF_ENTRIES(fman->state->rev_info.major); 1774 1775 fman->cfg->dma_read_buf_tsh_clr_emer = 1776 DFLT_DMA_READ_INT_BUF_LOW(fman->state->dma_thresh_max_buf); 1777 1778 fman->cfg->dma_read_buf_tsh_asrt_emer = 1779 DFLT_DMA_READ_INT_BUF_HIGH(fman->state->dma_thresh_max_buf); 1780 1781 fman->cfg->dma_write_buf_tsh_clr_emer = 1782 DFLT_DMA_WRITE_INT_BUF_LOW(fman->state->dma_thresh_max_buf); 1783 1784 fman->cfg->dma_write_buf_tsh_asrt_emer = 1785 DFLT_DMA_WRITE_INT_BUF_HIGH(fman->state->dma_thresh_max_buf); 1786 1787 fman->cfg->dma_axi_dbg_num_of_beats = 1788 DFLT_AXI_DBG_NUM_OF_BEATS; 1789 } 1790 1791 return 0; 1792 1793 err_fm_soc_specific: 1794 kfree(fman->cfg); 1795 err_fm_drv: 1796 kfree(fman->state); 1797 err_fm_state: 1798 kfree(fman); 1799 return -EINVAL; 1800 } 1801 1802 static int fman_reset(struct fman *fman) 1803 { 1804 u32 count; 1805 int err = 0; 1806 1807 if (fman->state->rev_info.major < 6) { 1808 iowrite32be(FPM_RSTC_FM_RESET, &fman->fpm_regs->fm_rstc); 1809 /* Wait for reset completion */ 1810 count = 100; 1811 do { 1812 udelay(1); 1813 } while (((ioread32be(&fman->fpm_regs->fm_rstc)) & 1814 FPM_RSTC_FM_RESET) && --count); 1815 if (count == 0) 1816 err = -EBUSY; 1817 1818 goto _return; 1819 } else { 1820 #ifdef CONFIG_PPC 1821 struct device_node *guts_node; 1822 struct ccsr_guts __iomem *guts_regs; 1823 u32 devdisr2, reg; 1824 1825 /* Errata A007273 */ 1826 guts_node = 1827 of_find_compatible_node(NULL, NULL, 1828 "fsl,qoriq-device-config-2.0"); 1829 if (!guts_node) { 1830 dev_err(fman->dev, "%s: Couldn't find guts node\n", 1831 __func__); 1832 goto guts_node; 1833 } 1834 1835 guts_regs = of_iomap(guts_node, 0); 1836 if (!guts_regs) { 1837 dev_err(fman->dev, "%s: Couldn't map %pOF regs\n", 1838 __func__, guts_node); 1839 goto guts_regs; 1840 } 1841 #define FMAN1_ALL_MACS_MASK 0xFCC00000 1842 #define FMAN2_ALL_MACS_MASK 0x000FCC00 1843 /* Read current state */ 1844 devdisr2 = ioread32be(&guts_regs->devdisr2); 1845 if (fman->dts_params.id == 0) 1846 reg = devdisr2 & ~FMAN1_ALL_MACS_MASK; 1847 else 1848 reg = devdisr2 & ~FMAN2_ALL_MACS_MASK; 1849 1850 /* Enable all MACs */ 1851 iowrite32be(reg, &guts_regs->devdisr2); 1852 #endif 1853 1854 /* Perform FMan reset */ 1855 iowrite32be(FPM_RSTC_FM_RESET, &fman->fpm_regs->fm_rstc); 1856 1857 /* Wait for reset completion */ 1858 count = 100; 1859 do { 1860 udelay(1); 1861 } while (((ioread32be(&fman->fpm_regs->fm_rstc)) & 1862 FPM_RSTC_FM_RESET) && --count); 1863 if (count == 0) { 1864 #ifdef CONFIG_PPC 1865 iounmap(guts_regs); 1866 of_node_put(guts_node); 1867 #endif 1868 err = -EBUSY; 1869 goto _return; 1870 } 1871 #ifdef CONFIG_PPC 1872 1873 /* Restore devdisr2 value */ 1874 iowrite32be(devdisr2, &guts_regs->devdisr2); 1875 1876 iounmap(guts_regs); 1877 of_node_put(guts_node); 1878 #endif 1879 1880 goto _return; 1881 1882 #ifdef CONFIG_PPC 1883 guts_regs: 1884 of_node_put(guts_node); 1885 guts_node: 1886 dev_dbg(fman->dev, "%s: Didn't perform FManV3 reset due to Errata A007273!\n", 1887 __func__); 1888 #endif 1889 } 1890 _return: 1891 return err; 1892 } 1893 1894 static int fman_init(struct fman *fman) 1895 { 1896 struct fman_cfg *cfg = NULL; 1897 int err = 0, i, count; 1898 1899 if (is_init_done(fman->cfg)) 1900 return -EINVAL; 1901 1902 fman->state->count1_micro_bit = FM_TIMESTAMP_1_USEC_BIT; 1903 1904 cfg = fman->cfg; 1905 1906 /* clear revision-dependent non existing exception */ 1907 if (fman->state->rev_info.major < 6) 1908 fman->state->exceptions &= ~FMAN_EX_BMI_DISPATCH_RAM_ECC; 1909 1910 if (fman->state->rev_info.major >= 6) 1911 fman->state->exceptions &= ~FMAN_EX_QMI_SINGLE_ECC; 1912 1913 /* clear CPG */ 1914 memset_io((void __iomem *)(fman->base_addr + CGP_OFFSET), 0, 1915 fman->state->fm_port_num_of_cg); 1916 1917 /* Save LIODN info before FMan reset 1918 * Skipping non-existent port 0 (i = 1) 1919 */ 1920 for (i = 1; i < FMAN_LIODN_TBL; i++) { 1921 u32 liodn_base; 1922 1923 fman->liodn_offset[i] = 1924 ioread32be(&fman->bmi_regs->fmbm_spliodn[i - 1]); 1925 if (!IS_ENABLED(CONFIG_FSL_PAMU)) 1926 continue; 1927 liodn_base = ioread32be(&fman->dma_regs->fmdmplr[i / 2]); 1928 if (i % 2) { 1929 /* FMDM_PLR LSB holds LIODN base for odd ports */ 1930 liodn_base &= DMA_LIODN_BASE_MASK; 1931 } else { 1932 /* FMDM_PLR MSB holds LIODN base for even ports */ 1933 liodn_base >>= DMA_LIODN_SHIFT; 1934 liodn_base &= DMA_LIODN_BASE_MASK; 1935 } 1936 fman->liodn_base[i] = liodn_base; 1937 } 1938 1939 err = fman_reset(fman); 1940 if (err) 1941 return err; 1942 1943 if (ioread32be(&fman->qmi_regs->fmqm_gs) & QMI_GS_HALT_NOT_BUSY) { 1944 resume(fman->fpm_regs); 1945 /* Wait until QMI is not in halt not busy state */ 1946 count = 100; 1947 do { 1948 udelay(1); 1949 } while (((ioread32be(&fman->qmi_regs->fmqm_gs)) & 1950 QMI_GS_HALT_NOT_BUSY) && --count); 1951 if (count == 0) 1952 dev_warn(fman->dev, "%s: QMI is in halt not busy state\n", 1953 __func__); 1954 } 1955 1956 if (clear_iram(fman) != 0) 1957 return -EINVAL; 1958 1959 cfg->exceptions = fman->state->exceptions; 1960 1961 /* Init DMA Registers */ 1962 1963 err = dma_init(fman); 1964 if (err != 0) { 1965 free_init_resources(fman); 1966 return err; 1967 } 1968 1969 /* Init FPM Registers */ 1970 fpm_init(fman->fpm_regs, fman->cfg); 1971 1972 /* define common resources */ 1973 /* allocate MURAM for FIFO according to total size */ 1974 fman->fifo_offset = fman_muram_alloc(fman->muram, 1975 fman->state->total_fifo_size); 1976 if (IS_ERR_VALUE(fman->fifo_offset)) { 1977 free_init_resources(fman); 1978 dev_err(fman->dev, "%s: MURAM alloc for BMI FIFO failed\n", 1979 __func__); 1980 return -ENOMEM; 1981 } 1982 1983 cfg->fifo_base_addr = fman->fifo_offset; 1984 cfg->total_fifo_size = fman->state->total_fifo_size; 1985 cfg->total_num_of_tasks = fman->state->total_num_of_tasks; 1986 cfg->clk_freq = fman->state->fm_clk_freq; 1987 1988 /* Init BMI Registers */ 1989 bmi_init(fman->bmi_regs, fman->cfg); 1990 1991 /* Init QMI Registers */ 1992 qmi_init(fman->qmi_regs, fman->cfg); 1993 1994 /* Init HW Parser */ 1995 hwp_init(fman->hwp_regs); 1996 1997 /* Init KeyGen */ 1998 fman->keygen = keygen_init(fman->kg_regs); 1999 if (!fman->keygen) 2000 return -EINVAL; 2001 2002 err = enable(fman, cfg); 2003 if (err != 0) 2004 return err; 2005 2006 enable_time_stamp(fman); 2007 2008 kfree(fman->cfg); 2009 fman->cfg = NULL; 2010 2011 return 0; 2012 } 2013 2014 static int fman_set_exception(struct fman *fman, 2015 enum fman_exceptions exception, bool enable) 2016 { 2017 u32 bit_mask = 0; 2018 2019 if (!is_init_done(fman->cfg)) 2020 return -EINVAL; 2021 2022 bit_mask = get_exception_flag(exception); 2023 if (bit_mask) { 2024 if (enable) 2025 fman->state->exceptions |= bit_mask; 2026 else 2027 fman->state->exceptions &= ~bit_mask; 2028 } else { 2029 dev_err(fman->dev, "%s: Undefined exception (%d)\n", 2030 __func__, exception); 2031 return -EINVAL; 2032 } 2033 2034 return set_exception(fman, exception, enable); 2035 } 2036 2037 /** 2038 * fman_register_intr 2039 * @fman: A Pointer to FMan device 2040 * @module: Calling module 2041 * @mod_id: Module id (if more than 1 exists, '0' if not) 2042 * @intr_type: Interrupt type (error/normal) selection. 2043 * @isr_cb: The interrupt service routine. 2044 * @src_arg: Argument to be passed to isr_cb. 2045 * 2046 * Used to register an event handler to be processed by FMan 2047 * 2048 * Return: 0 on success; Error code otherwise. 2049 */ 2050 void fman_register_intr(struct fman *fman, enum fman_event_modules module, 2051 u8 mod_id, enum fman_intr_type intr_type, 2052 void (*isr_cb)(void *src_arg), void *src_arg) 2053 { 2054 int event = 0; 2055 2056 event = get_module_event(module, mod_id, intr_type); 2057 WARN_ON(event >= FMAN_EV_CNT); 2058 2059 /* register in local FM structure */ 2060 fman->intr_mng[event].isr_cb = isr_cb; 2061 fman->intr_mng[event].src_handle = src_arg; 2062 } 2063 EXPORT_SYMBOL(fman_register_intr); 2064 2065 /** 2066 * fman_unregister_intr 2067 * @fman: A Pointer to FMan device 2068 * @module: Calling module 2069 * @mod_id: Module id (if more than 1 exists, '0' if not) 2070 * @intr_type: Interrupt type (error/normal) selection. 2071 * 2072 * Used to unregister an event handler to be processed by FMan 2073 * 2074 * Return: 0 on success; Error code otherwise. 2075 */ 2076 void fman_unregister_intr(struct fman *fman, enum fman_event_modules module, 2077 u8 mod_id, enum fman_intr_type intr_type) 2078 { 2079 int event = 0; 2080 2081 event = get_module_event(module, mod_id, intr_type); 2082 WARN_ON(event >= FMAN_EV_CNT); 2083 2084 fman->intr_mng[event].isr_cb = NULL; 2085 fman->intr_mng[event].src_handle = NULL; 2086 } 2087 EXPORT_SYMBOL(fman_unregister_intr); 2088 2089 /** 2090 * fman_set_port_params 2091 * @fman: A Pointer to FMan device 2092 * @port_params: Port parameters 2093 * 2094 * Used by FMan Port to pass parameters to the FMan 2095 * 2096 * Return: 0 on success; Error code otherwise. 2097 */ 2098 int fman_set_port_params(struct fman *fman, 2099 struct fman_port_init_params *port_params) 2100 { 2101 int err; 2102 unsigned long flags; 2103 u8 port_id = port_params->port_id, mac_id; 2104 2105 spin_lock_irqsave(&fman->spinlock, flags); 2106 2107 err = set_num_of_tasks(fman, port_params->port_id, 2108 &port_params->num_of_tasks, 2109 &port_params->num_of_extra_tasks); 2110 if (err) 2111 goto return_err; 2112 2113 /* TX Ports */ 2114 if (port_params->port_type != FMAN_PORT_TYPE_RX) { 2115 u32 enq_th, deq_th, reg; 2116 2117 /* update qmi ENQ/DEQ threshold */ 2118 fman->state->accumulated_num_of_deq_tnums += 2119 port_params->deq_pipeline_depth; 2120 enq_th = (ioread32be(&fman->qmi_regs->fmqm_gc) & 2121 QMI_CFG_ENQ_MASK) >> QMI_CFG_ENQ_SHIFT; 2122 /* if enq_th is too big, we reduce it to the max value 2123 * that is still 0 2124 */ 2125 if (enq_th >= (fman->state->qmi_max_num_of_tnums - 2126 fman->state->accumulated_num_of_deq_tnums)) { 2127 enq_th = 2128 fman->state->qmi_max_num_of_tnums - 2129 fman->state->accumulated_num_of_deq_tnums - 1; 2130 2131 reg = ioread32be(&fman->qmi_regs->fmqm_gc); 2132 reg &= ~QMI_CFG_ENQ_MASK; 2133 reg |= (enq_th << QMI_CFG_ENQ_SHIFT); 2134 iowrite32be(reg, &fman->qmi_regs->fmqm_gc); 2135 } 2136 2137 deq_th = ioread32be(&fman->qmi_regs->fmqm_gc) & 2138 QMI_CFG_DEQ_MASK; 2139 /* if deq_th is too small, we enlarge it to the min 2140 * value that is still 0. 2141 * depTh may not be larger than 63 2142 * (fman->state->qmi_max_num_of_tnums-1). 2143 */ 2144 if ((deq_th <= fman->state->accumulated_num_of_deq_tnums) && 2145 (deq_th < fman->state->qmi_max_num_of_tnums - 1)) { 2146 deq_th = fman->state->accumulated_num_of_deq_tnums + 1; 2147 reg = ioread32be(&fman->qmi_regs->fmqm_gc); 2148 reg &= ~QMI_CFG_DEQ_MASK; 2149 reg |= deq_th; 2150 iowrite32be(reg, &fman->qmi_regs->fmqm_gc); 2151 } 2152 } 2153 2154 err = set_size_of_fifo(fman, port_params->port_id, 2155 &port_params->size_of_fifo, 2156 &port_params->extra_size_of_fifo); 2157 if (err) 2158 goto return_err; 2159 2160 err = set_num_of_open_dmas(fman, port_params->port_id, 2161 &port_params->num_of_open_dmas, 2162 &port_params->num_of_extra_open_dmas); 2163 if (err) 2164 goto return_err; 2165 2166 set_port_liodn(fman, port_id, fman->liodn_base[port_id], 2167 fman->liodn_offset[port_id]); 2168 2169 if (fman->state->rev_info.major < 6) 2170 set_port_order_restoration(fman->fpm_regs, port_id); 2171 2172 mac_id = hw_port_id_to_sw_port_id(fman->state->rev_info.major, port_id); 2173 2174 if (port_params->max_frame_length >= fman->state->mac_mfl[mac_id]) { 2175 fman->state->port_mfl[mac_id] = port_params->max_frame_length; 2176 } else { 2177 dev_warn(fman->dev, "%s: Port (%d) max_frame_length is smaller than MAC (%d) current MTU\n", 2178 __func__, port_id, mac_id); 2179 err = -EINVAL; 2180 goto return_err; 2181 } 2182 2183 spin_unlock_irqrestore(&fman->spinlock, flags); 2184 2185 return 0; 2186 2187 return_err: 2188 spin_unlock_irqrestore(&fman->spinlock, flags); 2189 return err; 2190 } 2191 EXPORT_SYMBOL(fman_set_port_params); 2192 2193 /** 2194 * fman_reset_mac 2195 * @fman: A Pointer to FMan device 2196 * @mac_id: MAC id to be reset 2197 * 2198 * Reset a specific MAC 2199 * 2200 * Return: 0 on success; Error code otherwise. 2201 */ 2202 int fman_reset_mac(struct fman *fman, u8 mac_id) 2203 { 2204 struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs; 2205 u32 msk, timeout = 100; 2206 2207 if (fman->state->rev_info.major >= 6) { 2208 dev_err(fman->dev, "%s: FMan MAC reset no available for FMan V3!\n", 2209 __func__); 2210 return -EINVAL; 2211 } 2212 2213 /* Get the relevant bit mask */ 2214 switch (mac_id) { 2215 case 0: 2216 msk = FPM_RSTC_MAC0_RESET; 2217 break; 2218 case 1: 2219 msk = FPM_RSTC_MAC1_RESET; 2220 break; 2221 case 2: 2222 msk = FPM_RSTC_MAC2_RESET; 2223 break; 2224 case 3: 2225 msk = FPM_RSTC_MAC3_RESET; 2226 break; 2227 case 4: 2228 msk = FPM_RSTC_MAC4_RESET; 2229 break; 2230 case 5: 2231 msk = FPM_RSTC_MAC5_RESET; 2232 break; 2233 case 6: 2234 msk = FPM_RSTC_MAC6_RESET; 2235 break; 2236 case 7: 2237 msk = FPM_RSTC_MAC7_RESET; 2238 break; 2239 case 8: 2240 msk = FPM_RSTC_MAC8_RESET; 2241 break; 2242 case 9: 2243 msk = FPM_RSTC_MAC9_RESET; 2244 break; 2245 default: 2246 dev_warn(fman->dev, "%s: Illegal MAC Id [%d]\n", 2247 __func__, mac_id); 2248 return -EINVAL; 2249 } 2250 2251 /* reset */ 2252 iowrite32be(msk, &fpm_rg->fm_rstc); 2253 while ((ioread32be(&fpm_rg->fm_rstc) & msk) && --timeout) 2254 udelay(10); 2255 2256 if (!timeout) 2257 return -EIO; 2258 2259 return 0; 2260 } 2261 EXPORT_SYMBOL(fman_reset_mac); 2262 2263 /** 2264 * fman_set_mac_max_frame 2265 * @fman: A Pointer to FMan device 2266 * @mac_id: MAC id 2267 * @mfl: Maximum frame length 2268 * 2269 * Set maximum frame length of specific MAC in FMan driver 2270 * 2271 * Return: 0 on success; Error code otherwise. 2272 */ 2273 int fman_set_mac_max_frame(struct fman *fman, u8 mac_id, u16 mfl) 2274 { 2275 /* if port is already initialized, check that MaxFrameLength is smaller 2276 * or equal to the port's max 2277 */ 2278 if ((!fman->state->port_mfl[mac_id]) || 2279 (mfl <= fman->state->port_mfl[mac_id])) { 2280 fman->state->mac_mfl[mac_id] = mfl; 2281 } else { 2282 dev_warn(fman->dev, "%s: MAC max_frame_length is larger than Port max_frame_length\n", 2283 __func__); 2284 return -EINVAL; 2285 } 2286 return 0; 2287 } 2288 EXPORT_SYMBOL(fman_set_mac_max_frame); 2289 2290 /** 2291 * fman_get_clock_freq 2292 * @fman: A Pointer to FMan device 2293 * 2294 * Get FMan clock frequency 2295 * 2296 * Return: FMan clock frequency 2297 */ 2298 u16 fman_get_clock_freq(struct fman *fman) 2299 { 2300 return fman->state->fm_clk_freq; 2301 } 2302 2303 /** 2304 * fman_get_bmi_max_fifo_size 2305 * @fman: A Pointer to FMan device 2306 * 2307 * Get FMan maximum FIFO size 2308 * 2309 * Return: FMan Maximum FIFO size 2310 */ 2311 u32 fman_get_bmi_max_fifo_size(struct fman *fman) 2312 { 2313 return fman->state->bmi_max_fifo_size; 2314 } 2315 EXPORT_SYMBOL(fman_get_bmi_max_fifo_size); 2316 2317 /** 2318 * fman_get_revision 2319 * @fman: - Pointer to the FMan module 2320 * @rev_info: - A structure of revision information parameters. 2321 * 2322 * Returns the FM revision 2323 * 2324 * Allowed only following fman_init(). 2325 * 2326 * Return: 0 on success; Error code otherwise. 2327 */ 2328 void fman_get_revision(struct fman *fman, struct fman_rev_info *rev_info) 2329 { 2330 u32 tmp; 2331 2332 tmp = ioread32be(&fman->fpm_regs->fm_ip_rev_1); 2333 rev_info->major = (u8)((tmp & FPM_REV1_MAJOR_MASK) >> 2334 FPM_REV1_MAJOR_SHIFT); 2335 rev_info->minor = tmp & FPM_REV1_MINOR_MASK; 2336 } 2337 EXPORT_SYMBOL(fman_get_revision); 2338 2339 /** 2340 * fman_get_qman_channel_id 2341 * @fman: A Pointer to FMan device 2342 * @port_id: Port id 2343 * 2344 * Get QMan channel ID associated to the Port id 2345 * 2346 * Return: QMan channel ID 2347 */ 2348 u32 fman_get_qman_channel_id(struct fman *fman, u32 port_id) 2349 { 2350 int i; 2351 2352 if (fman->state->rev_info.major >= 6) { 2353 static const u32 port_ids[] = { 2354 0x30, 0x31, 0x28, 0x29, 0x2a, 0x2b, 2355 0x2c, 0x2d, 0x2, 0x3, 0x4, 0x5, 0x7, 0x7 2356 }; 2357 2358 for (i = 0; i < fman->state->num_of_qman_channels; i++) { 2359 if (port_ids[i] == port_id) 2360 break; 2361 } 2362 } else { 2363 static const u32 port_ids[] = { 2364 0x30, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x1, 2365 0x2, 0x3, 0x4, 0x5, 0x7, 0x7 2366 }; 2367 2368 for (i = 0; i < fman->state->num_of_qman_channels; i++) { 2369 if (port_ids[i] == port_id) 2370 break; 2371 } 2372 } 2373 2374 if (i == fman->state->num_of_qman_channels) 2375 return 0; 2376 2377 return fman->state->qman_channel_base + i; 2378 } 2379 EXPORT_SYMBOL(fman_get_qman_channel_id); 2380 2381 /** 2382 * fman_get_mem_region 2383 * @fman: A Pointer to FMan device 2384 * 2385 * Get FMan memory region 2386 * 2387 * Return: A structure with FMan memory region information 2388 */ 2389 struct resource *fman_get_mem_region(struct fman *fman) 2390 { 2391 return fman->state->res; 2392 } 2393 EXPORT_SYMBOL(fman_get_mem_region); 2394 2395 /* Bootargs defines */ 2396 /* Extra headroom for RX buffers - Default, min and max */ 2397 #define FSL_FM_RX_EXTRA_HEADROOM 64 2398 #define FSL_FM_RX_EXTRA_HEADROOM_MIN 16 2399 #define FSL_FM_RX_EXTRA_HEADROOM_MAX 384 2400 2401 /* Maximum frame length */ 2402 #define FSL_FM_MAX_FRAME_SIZE 1522 2403 #define FSL_FM_MAX_POSSIBLE_FRAME_SIZE 9600 2404 #define FSL_FM_MIN_POSSIBLE_FRAME_SIZE 64 2405 2406 /* Extra headroom for Rx buffers. 2407 * FMan is instructed to allocate, on the Rx path, this amount of 2408 * space at the beginning of a data buffer, beside the DPA private 2409 * data area and the IC fields. 2410 * Does not impact Tx buffer layout. 2411 * Configurable from bootargs. 64 by default, it's needed on 2412 * particular forwarding scenarios that add extra headers to the 2413 * forwarded frame. 2414 */ 2415 static int fsl_fm_rx_extra_headroom = FSL_FM_RX_EXTRA_HEADROOM; 2416 module_param(fsl_fm_rx_extra_headroom, int, 0); 2417 MODULE_PARM_DESC(fsl_fm_rx_extra_headroom, "Extra headroom for Rx buffers"); 2418 2419 /* Max frame size, across all interfaces. 2420 * Configurable from bootargs, to avoid allocating oversized (socket) 2421 * buffers when not using jumbo frames. 2422 * Must be large enough to accommodate the network MTU, but small enough 2423 * to avoid wasting skb memory. 2424 */ 2425 static int fsl_fm_max_frm = FSL_FM_MAX_FRAME_SIZE; 2426 module_param(fsl_fm_max_frm, int, 0); 2427 MODULE_PARM_DESC(fsl_fm_max_frm, "Maximum frame size, across all interfaces"); 2428 2429 /** 2430 * fman_get_max_frm 2431 * 2432 * Return: Max frame length configured in the FM driver 2433 */ 2434 u16 fman_get_max_frm(void) 2435 { 2436 static bool fm_check_mfl; 2437 2438 if (!fm_check_mfl) { 2439 if (fsl_fm_max_frm > FSL_FM_MAX_POSSIBLE_FRAME_SIZE || 2440 fsl_fm_max_frm < FSL_FM_MIN_POSSIBLE_FRAME_SIZE) { 2441 pr_warn("Invalid fsl_fm_max_frm value (%d) in bootargs, valid range is %d-%d. Falling back to the default (%d)\n", 2442 fsl_fm_max_frm, 2443 FSL_FM_MIN_POSSIBLE_FRAME_SIZE, 2444 FSL_FM_MAX_POSSIBLE_FRAME_SIZE, 2445 FSL_FM_MAX_FRAME_SIZE); 2446 fsl_fm_max_frm = FSL_FM_MAX_FRAME_SIZE; 2447 } 2448 fm_check_mfl = true; 2449 } 2450 2451 return fsl_fm_max_frm; 2452 } 2453 EXPORT_SYMBOL(fman_get_max_frm); 2454 2455 /** 2456 * fman_get_rx_extra_headroom 2457 * 2458 * Return: Extra headroom size configured in the FM driver 2459 */ 2460 int fman_get_rx_extra_headroom(void) 2461 { 2462 static bool fm_check_rx_extra_headroom; 2463 2464 if (!fm_check_rx_extra_headroom) { 2465 if (fsl_fm_rx_extra_headroom > FSL_FM_RX_EXTRA_HEADROOM_MAX || 2466 fsl_fm_rx_extra_headroom < FSL_FM_RX_EXTRA_HEADROOM_MIN) { 2467 pr_warn("Invalid fsl_fm_rx_extra_headroom value (%d) in bootargs, valid range is %d-%d. Falling back to the default (%d)\n", 2468 fsl_fm_rx_extra_headroom, 2469 FSL_FM_RX_EXTRA_HEADROOM_MIN, 2470 FSL_FM_RX_EXTRA_HEADROOM_MAX, 2471 FSL_FM_RX_EXTRA_HEADROOM); 2472 fsl_fm_rx_extra_headroom = FSL_FM_RX_EXTRA_HEADROOM; 2473 } 2474 2475 fm_check_rx_extra_headroom = true; 2476 fsl_fm_rx_extra_headroom = ALIGN(fsl_fm_rx_extra_headroom, 16); 2477 } 2478 2479 return fsl_fm_rx_extra_headroom; 2480 } 2481 EXPORT_SYMBOL(fman_get_rx_extra_headroom); 2482 2483 /** 2484 * fman_bind 2485 * @fm_dev: FMan OF device pointer 2486 * 2487 * Bind to a specific FMan device. 2488 * 2489 * Allowed only after the port was created. 2490 * 2491 * Return: A pointer to the FMan device 2492 */ 2493 struct fman *fman_bind(struct device *fm_dev) 2494 { 2495 return (struct fman *)(dev_get_drvdata(get_device(fm_dev))); 2496 } 2497 EXPORT_SYMBOL(fman_bind); 2498 2499 #ifdef CONFIG_DPAA_ERRATUM_A050385 2500 bool fman_has_errata_a050385(void) 2501 { 2502 return fman_has_err_a050385; 2503 } 2504 EXPORT_SYMBOL(fman_has_errata_a050385); 2505 #endif 2506 2507 static irqreturn_t fman_err_irq(int irq, void *handle) 2508 { 2509 struct fman *fman = (struct fman *)handle; 2510 u32 pending; 2511 struct fman_fpm_regs __iomem *fpm_rg; 2512 irqreturn_t single_ret, ret = IRQ_NONE; 2513 2514 if (!is_init_done(fman->cfg)) 2515 return IRQ_NONE; 2516 2517 fpm_rg = fman->fpm_regs; 2518 2519 /* error interrupts */ 2520 pending = ioread32be(&fpm_rg->fm_epi); 2521 if (!pending) 2522 return IRQ_NONE; 2523 2524 if (pending & ERR_INTR_EN_BMI) { 2525 single_ret = bmi_err_event(fman); 2526 if (single_ret == IRQ_HANDLED) 2527 ret = IRQ_HANDLED; 2528 } 2529 if (pending & ERR_INTR_EN_QMI) { 2530 single_ret = qmi_err_event(fman); 2531 if (single_ret == IRQ_HANDLED) 2532 ret = IRQ_HANDLED; 2533 } 2534 if (pending & ERR_INTR_EN_FPM) { 2535 single_ret = fpm_err_event(fman); 2536 if (single_ret == IRQ_HANDLED) 2537 ret = IRQ_HANDLED; 2538 } 2539 if (pending & ERR_INTR_EN_DMA) { 2540 single_ret = dma_err_event(fman); 2541 if (single_ret == IRQ_HANDLED) 2542 ret = IRQ_HANDLED; 2543 } 2544 if (pending & ERR_INTR_EN_MURAM) { 2545 single_ret = muram_err_intr(fman); 2546 if (single_ret == IRQ_HANDLED) 2547 ret = IRQ_HANDLED; 2548 } 2549 2550 /* MAC error interrupts */ 2551 if (pending & ERR_INTR_EN_MAC0) { 2552 single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 0); 2553 if (single_ret == IRQ_HANDLED) 2554 ret = IRQ_HANDLED; 2555 } 2556 if (pending & ERR_INTR_EN_MAC1) { 2557 single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 1); 2558 if (single_ret == IRQ_HANDLED) 2559 ret = IRQ_HANDLED; 2560 } 2561 if (pending & ERR_INTR_EN_MAC2) { 2562 single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 2); 2563 if (single_ret == IRQ_HANDLED) 2564 ret = IRQ_HANDLED; 2565 } 2566 if (pending & ERR_INTR_EN_MAC3) { 2567 single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 3); 2568 if (single_ret == IRQ_HANDLED) 2569 ret = IRQ_HANDLED; 2570 } 2571 if (pending & ERR_INTR_EN_MAC4) { 2572 single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 4); 2573 if (single_ret == IRQ_HANDLED) 2574 ret = IRQ_HANDLED; 2575 } 2576 if (pending & ERR_INTR_EN_MAC5) { 2577 single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 5); 2578 if (single_ret == IRQ_HANDLED) 2579 ret = IRQ_HANDLED; 2580 } 2581 if (pending & ERR_INTR_EN_MAC6) { 2582 single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 6); 2583 if (single_ret == IRQ_HANDLED) 2584 ret = IRQ_HANDLED; 2585 } 2586 if (pending & ERR_INTR_EN_MAC7) { 2587 single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 7); 2588 if (single_ret == IRQ_HANDLED) 2589 ret = IRQ_HANDLED; 2590 } 2591 if (pending & ERR_INTR_EN_MAC8) { 2592 single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 8); 2593 if (single_ret == IRQ_HANDLED) 2594 ret = IRQ_HANDLED; 2595 } 2596 if (pending & ERR_INTR_EN_MAC9) { 2597 single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 9); 2598 if (single_ret == IRQ_HANDLED) 2599 ret = IRQ_HANDLED; 2600 } 2601 2602 return ret; 2603 } 2604 2605 static irqreturn_t fman_irq(int irq, void *handle) 2606 { 2607 struct fman *fman = (struct fman *)handle; 2608 u32 pending; 2609 struct fman_fpm_regs __iomem *fpm_rg; 2610 irqreturn_t single_ret, ret = IRQ_NONE; 2611 2612 if (!is_init_done(fman->cfg)) 2613 return IRQ_NONE; 2614 2615 fpm_rg = fman->fpm_regs; 2616 2617 /* normal interrupts */ 2618 pending = ioread32be(&fpm_rg->fm_npi); 2619 if (!pending) 2620 return IRQ_NONE; 2621 2622 if (pending & INTR_EN_QMI) { 2623 single_ret = qmi_event(fman); 2624 if (single_ret == IRQ_HANDLED) 2625 ret = IRQ_HANDLED; 2626 } 2627 2628 /* MAC interrupts */ 2629 if (pending & INTR_EN_MAC0) { 2630 single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 0); 2631 if (single_ret == IRQ_HANDLED) 2632 ret = IRQ_HANDLED; 2633 } 2634 if (pending & INTR_EN_MAC1) { 2635 single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 1); 2636 if (single_ret == IRQ_HANDLED) 2637 ret = IRQ_HANDLED; 2638 } 2639 if (pending & INTR_EN_MAC2) { 2640 single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 2); 2641 if (single_ret == IRQ_HANDLED) 2642 ret = IRQ_HANDLED; 2643 } 2644 if (pending & INTR_EN_MAC3) { 2645 single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 3); 2646 if (single_ret == IRQ_HANDLED) 2647 ret = IRQ_HANDLED; 2648 } 2649 if (pending & INTR_EN_MAC4) { 2650 single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 4); 2651 if (single_ret == IRQ_HANDLED) 2652 ret = IRQ_HANDLED; 2653 } 2654 if (pending & INTR_EN_MAC5) { 2655 single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 5); 2656 if (single_ret == IRQ_HANDLED) 2657 ret = IRQ_HANDLED; 2658 } 2659 if (pending & INTR_EN_MAC6) { 2660 single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 6); 2661 if (single_ret == IRQ_HANDLED) 2662 ret = IRQ_HANDLED; 2663 } 2664 if (pending & INTR_EN_MAC7) { 2665 single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 7); 2666 if (single_ret == IRQ_HANDLED) 2667 ret = IRQ_HANDLED; 2668 } 2669 if (pending & INTR_EN_MAC8) { 2670 single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 8); 2671 if (single_ret == IRQ_HANDLED) 2672 ret = IRQ_HANDLED; 2673 } 2674 if (pending & INTR_EN_MAC9) { 2675 single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 9); 2676 if (single_ret == IRQ_HANDLED) 2677 ret = IRQ_HANDLED; 2678 } 2679 2680 return ret; 2681 } 2682 2683 static const struct of_device_id fman_muram_match[] = { 2684 { 2685 .compatible = "fsl,fman-muram"}, 2686 {} 2687 }; 2688 MODULE_DEVICE_TABLE(of, fman_muram_match); 2689 2690 static struct fman *read_dts_node(struct platform_device *of_dev) 2691 { 2692 struct fman *fman; 2693 struct device_node *fm_node, *muram_node; 2694 struct resource *res; 2695 u32 val, range[2]; 2696 int err, irq; 2697 struct clk *clk; 2698 u32 clk_rate; 2699 phys_addr_t phys_base_addr; 2700 resource_size_t mem_size; 2701 2702 fman = kzalloc(sizeof(*fman), GFP_KERNEL); 2703 if (!fman) 2704 return ERR_PTR(-ENOMEM); 2705 2706 fm_node = of_node_get(of_dev->dev.of_node); 2707 2708 err = of_property_read_u32(fm_node, "cell-index", &val); 2709 if (err) { 2710 dev_err(&of_dev->dev, "%s: failed to read cell-index for %pOF\n", 2711 __func__, fm_node); 2712 goto fman_node_put; 2713 } 2714 fman->dts_params.id = (u8)val; 2715 2716 /* Get the FM interrupt */ 2717 err = platform_get_irq(of_dev, 0); 2718 if (err < 0) 2719 goto fman_node_put; 2720 irq = err; 2721 2722 /* Get the FM error interrupt */ 2723 err = platform_get_irq(of_dev, 1); 2724 if (err < 0) 2725 goto fman_node_put; 2726 fman->dts_params.err_irq = err; 2727 2728 /* Get the FM address */ 2729 res = platform_get_resource(of_dev, IORESOURCE_MEM, 0); 2730 if (!res) { 2731 err = -EINVAL; 2732 dev_err(&of_dev->dev, "%s: Can't get FMan memory resource\n", 2733 __func__); 2734 goto fman_node_put; 2735 } 2736 2737 phys_base_addr = res->start; 2738 mem_size = resource_size(res); 2739 2740 clk = of_clk_get(fm_node, 0); 2741 if (IS_ERR(clk)) { 2742 err = PTR_ERR(clk); 2743 dev_err(&of_dev->dev, "%s: Failed to get FM%d clock structure\n", 2744 __func__, fman->dts_params.id); 2745 goto fman_node_put; 2746 } 2747 2748 clk_rate = clk_get_rate(clk); 2749 if (!clk_rate) { 2750 err = -EINVAL; 2751 dev_err(&of_dev->dev, "%s: Failed to determine FM%d clock rate\n", 2752 __func__, fman->dts_params.id); 2753 goto fman_node_put; 2754 } 2755 /* Rounding to MHz */ 2756 fman->dts_params.clk_freq = DIV_ROUND_UP(clk_rate, 1000000); 2757 2758 err = of_property_read_u32_array(fm_node, "fsl,qman-channel-range", 2759 &range[0], 2); 2760 if (err) { 2761 dev_err(&of_dev->dev, "%s: failed to read fsl,qman-channel-range for %pOF\n", 2762 __func__, fm_node); 2763 goto fman_node_put; 2764 } 2765 fman->dts_params.qman_channel_base = range[0]; 2766 fman->dts_params.num_of_qman_channels = range[1]; 2767 2768 /* Get the MURAM base address and size */ 2769 muram_node = of_find_matching_node(fm_node, fman_muram_match); 2770 if (!muram_node) { 2771 err = -EINVAL; 2772 dev_err(&of_dev->dev, "%s: could not find MURAM node\n", 2773 __func__); 2774 goto fman_free; 2775 } 2776 2777 err = of_address_to_resource(muram_node, 0, 2778 &fman->dts_params.muram_res); 2779 if (err) { 2780 of_node_put(muram_node); 2781 dev_err(&of_dev->dev, "%s: of_address_to_resource() = %d\n", 2782 __func__, err); 2783 goto fman_free; 2784 } 2785 2786 of_node_put(muram_node); 2787 2788 err = devm_request_irq(&of_dev->dev, irq, fman_irq, IRQF_SHARED, 2789 "fman", fman); 2790 if (err < 0) { 2791 dev_err(&of_dev->dev, "%s: irq %d allocation failed (error = %d)\n", 2792 __func__, irq, err); 2793 goto fman_free; 2794 } 2795 2796 if (fman->dts_params.err_irq != 0) { 2797 err = devm_request_irq(&of_dev->dev, fman->dts_params.err_irq, 2798 fman_err_irq, IRQF_SHARED, 2799 "fman-err", fman); 2800 if (err < 0) { 2801 dev_err(&of_dev->dev, "%s: irq %d allocation failed (error = %d)\n", 2802 __func__, fman->dts_params.err_irq, err); 2803 goto fman_free; 2804 } 2805 } 2806 2807 fman->dts_params.res = 2808 devm_request_mem_region(&of_dev->dev, phys_base_addr, 2809 mem_size, "fman"); 2810 if (!fman->dts_params.res) { 2811 err = -EBUSY; 2812 dev_err(&of_dev->dev, "%s: request_mem_region() failed\n", 2813 __func__); 2814 goto fman_free; 2815 } 2816 2817 fman->dts_params.base_addr = 2818 devm_ioremap(&of_dev->dev, phys_base_addr, mem_size); 2819 if (!fman->dts_params.base_addr) { 2820 err = -ENOMEM; 2821 dev_err(&of_dev->dev, "%s: devm_ioremap() failed\n", __func__); 2822 goto fman_free; 2823 } 2824 2825 fman->dev = &of_dev->dev; 2826 2827 err = of_platform_populate(fm_node, NULL, NULL, &of_dev->dev); 2828 if (err) { 2829 dev_err(&of_dev->dev, "%s: of_platform_populate() failed\n", 2830 __func__); 2831 goto fman_free; 2832 } 2833 2834 #ifdef CONFIG_DPAA_ERRATUM_A050385 2835 fman_has_err_a050385 = 2836 of_property_read_bool(fm_node, "fsl,erratum-a050385"); 2837 #endif 2838 2839 return fman; 2840 2841 fman_node_put: 2842 of_node_put(fm_node); 2843 fman_free: 2844 kfree(fman); 2845 return ERR_PTR(err); 2846 } 2847 2848 static int fman_probe(struct platform_device *of_dev) 2849 { 2850 struct fman *fman; 2851 struct device *dev; 2852 int err; 2853 2854 dev = &of_dev->dev; 2855 2856 fman = read_dts_node(of_dev); 2857 if (IS_ERR(fman)) 2858 return PTR_ERR(fman); 2859 2860 err = fman_config(fman); 2861 if (err) { 2862 dev_err(dev, "%s: FMan config failed\n", __func__); 2863 return -EINVAL; 2864 } 2865 2866 if (fman_init(fman) != 0) { 2867 dev_err(dev, "%s: FMan init failed\n", __func__); 2868 return -EINVAL; 2869 } 2870 2871 if (fman->dts_params.err_irq == 0) { 2872 fman_set_exception(fman, FMAN_EX_DMA_BUS_ERROR, false); 2873 fman_set_exception(fman, FMAN_EX_DMA_READ_ECC, false); 2874 fman_set_exception(fman, FMAN_EX_DMA_SYSTEM_WRITE_ECC, false); 2875 fman_set_exception(fman, FMAN_EX_DMA_FM_WRITE_ECC, false); 2876 fman_set_exception(fman, FMAN_EX_DMA_SINGLE_PORT_ECC, false); 2877 fman_set_exception(fman, FMAN_EX_FPM_STALL_ON_TASKS, false); 2878 fman_set_exception(fman, FMAN_EX_FPM_SINGLE_ECC, false); 2879 fman_set_exception(fman, FMAN_EX_FPM_DOUBLE_ECC, false); 2880 fman_set_exception(fman, FMAN_EX_QMI_SINGLE_ECC, false); 2881 fman_set_exception(fman, FMAN_EX_QMI_DOUBLE_ECC, false); 2882 fman_set_exception(fman, 2883 FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID, false); 2884 fman_set_exception(fman, FMAN_EX_BMI_LIST_RAM_ECC, false); 2885 fman_set_exception(fman, FMAN_EX_BMI_STORAGE_PROFILE_ECC, 2886 false); 2887 fman_set_exception(fman, FMAN_EX_BMI_STATISTICS_RAM_ECC, false); 2888 fman_set_exception(fman, FMAN_EX_BMI_DISPATCH_RAM_ECC, false); 2889 } 2890 2891 dev_set_drvdata(dev, fman); 2892 2893 dev_dbg(dev, "FMan%d probed\n", fman->dts_params.id); 2894 2895 return 0; 2896 } 2897 2898 static const struct of_device_id fman_match[] = { 2899 { 2900 .compatible = "fsl,fman"}, 2901 {} 2902 }; 2903 2904 MODULE_DEVICE_TABLE(of, fman_match); 2905 2906 static struct platform_driver fman_driver = { 2907 .driver = { 2908 .name = "fsl-fman", 2909 .of_match_table = fman_match, 2910 }, 2911 .probe = fman_probe, 2912 }; 2913 2914 static int __init fman_load(void) 2915 { 2916 int err; 2917 2918 pr_debug("FSL DPAA FMan driver\n"); 2919 2920 err = platform_driver_register(&fman_driver); 2921 if (err < 0) 2922 pr_err("Error, platform_driver_register() = %d\n", err); 2923 2924 return err; 2925 } 2926 module_init(fman_load); 2927 2928 static void __exit fman_unload(void) 2929 { 2930 platform_driver_unregister(&fman_driver); 2931 } 2932 module_exit(fman_unload); 2933 2934 MODULE_LICENSE("Dual BSD/GPL"); 2935 MODULE_DESCRIPTION("Freescale DPAA Frame Manager driver"); 2936