1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (c) 2017 Hisilicon Limited. 4 */ 5 6 #include <linux/sched/clock.h> 7 #include "hisi_sas.h" 8 #define DRV_NAME "hisi_sas_v3_hw" 9 10 /* global registers need init */ 11 #define DLVRY_QUEUE_ENABLE 0x0 12 #define IOST_BASE_ADDR_LO 0x8 13 #define IOST_BASE_ADDR_HI 0xc 14 #define ITCT_BASE_ADDR_LO 0x10 15 #define ITCT_BASE_ADDR_HI 0x14 16 #define IO_BROKEN_MSG_ADDR_LO 0x18 17 #define IO_BROKEN_MSG_ADDR_HI 0x1c 18 #define PHY_CONTEXT 0x20 19 #define PHY_STATE 0x24 20 #define PHY_PORT_NUM_MA 0x28 21 #define PHY_CONN_RATE 0x30 22 #define ITCT_CLR 0x44 23 #define ITCT_CLR_EN_OFF 16 24 #define ITCT_CLR_EN_MSK (0x1 << ITCT_CLR_EN_OFF) 25 #define ITCT_DEV_OFF 0 26 #define ITCT_DEV_MSK (0x7ff << ITCT_DEV_OFF) 27 #define SAS_AXI_USER3 0x50 28 #define IO_SATA_BROKEN_MSG_ADDR_LO 0x58 29 #define IO_SATA_BROKEN_MSG_ADDR_HI 0x5c 30 #define SATA_INITI_D2H_STORE_ADDR_LO 0x60 31 #define SATA_INITI_D2H_STORE_ADDR_HI 0x64 32 #define CFG_MAX_TAG 0x68 33 #define TRANS_LOCK_ICT_TIME 0X70 34 #define HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL 0x84 35 #define HGC_SAS_TXFAIL_RETRY_CTRL 0x88 36 #define HGC_GET_ITV_TIME 0x90 37 #define DEVICE_MSG_WORK_MODE 0x94 38 #define OPENA_WT_CONTI_TIME 0x9c 39 #define I_T_NEXUS_LOSS_TIME 0xa0 40 #define MAX_CON_TIME_LIMIT_TIME 0xa4 41 #define BUS_INACTIVE_LIMIT_TIME 0xa8 42 #define REJECT_TO_OPEN_LIMIT_TIME 0xac 43 #define CQ_INT_CONVERGE_EN 0xb0 44 #define CFG_AGING_TIME 0xbc 45 #define HGC_DFX_CFG2 0xc0 46 #define CFG_ICT_TIMER_STEP_TRSH 0xc8 47 #define CFG_ABT_SET_QUERY_IPTT 0xd4 48 #define CFG_SET_ABORTED_IPTT_OFF 0 49 #define CFG_SET_ABORTED_IPTT_MSK (0xfff << CFG_SET_ABORTED_IPTT_OFF) 50 #define CFG_SET_ABORTED_EN_OFF 12 51 #define CFG_ABT_SET_IPTT_DONE 0xd8 52 #define CFG_ABT_SET_IPTT_DONE_OFF 0 53 #define HGC_IOMB_PROC1_STATUS 0x104 54 #define HGC_LM_DFX_STATUS2 0x128 55 #define HGC_LM_DFX_STATUS2_IOSTLIST_OFF 0 56 #define HGC_LM_DFX_STATUS2_IOSTLIST_MSK (0xfff << \ 57 HGC_LM_DFX_STATUS2_IOSTLIST_OFF) 58 #define HGC_LM_DFX_STATUS2_ITCTLIST_OFF 12 59 #define HGC_LM_DFX_STATUS2_ITCTLIST_MSK (0x7ff << \ 60 HGC_LM_DFX_STATUS2_ITCTLIST_OFF) 61 #define HGC_CQE_ECC_ADDR 0x13c 62 #define HGC_CQE_ECC_1B_ADDR_OFF 0 63 #define HGC_CQE_ECC_1B_ADDR_MSK (0x3f << HGC_CQE_ECC_1B_ADDR_OFF) 64 #define HGC_CQE_ECC_MB_ADDR_OFF 8 65 #define HGC_CQE_ECC_MB_ADDR_MSK (0x3f << HGC_CQE_ECC_MB_ADDR_OFF) 66 #define HGC_IOST_ECC_ADDR 0x140 67 #define HGC_IOST_ECC_1B_ADDR_OFF 0 68 #define HGC_IOST_ECC_1B_ADDR_MSK (0x3ff << HGC_IOST_ECC_1B_ADDR_OFF) 69 #define HGC_IOST_ECC_MB_ADDR_OFF 16 70 #define HGC_IOST_ECC_MB_ADDR_MSK (0x3ff << HGC_IOST_ECC_MB_ADDR_OFF) 71 #define HGC_DQE_ECC_ADDR 0x144 72 #define HGC_DQE_ECC_1B_ADDR_OFF 0 73 #define HGC_DQE_ECC_1B_ADDR_MSK (0xfff << HGC_DQE_ECC_1B_ADDR_OFF) 74 #define HGC_DQE_ECC_MB_ADDR_OFF 16 75 #define HGC_DQE_ECC_MB_ADDR_MSK (0xfff << HGC_DQE_ECC_MB_ADDR_OFF) 76 #define CHNL_INT_STATUS 0x148 77 #define TAB_DFX 0x14c 78 #define HGC_ITCT_ECC_ADDR 0x150 79 #define HGC_ITCT_ECC_1B_ADDR_OFF 0 80 #define HGC_ITCT_ECC_1B_ADDR_MSK (0x3ff << \ 81 HGC_ITCT_ECC_1B_ADDR_OFF) 82 #define HGC_ITCT_ECC_MB_ADDR_OFF 16 83 #define HGC_ITCT_ECC_MB_ADDR_MSK (0x3ff << \ 84 HGC_ITCT_ECC_MB_ADDR_OFF) 85 #define HGC_AXI_FIFO_ERR_INFO 0x154 86 #define AXI_ERR_INFO_OFF 0 87 #define AXI_ERR_INFO_MSK (0xff << AXI_ERR_INFO_OFF) 88 #define FIFO_ERR_INFO_OFF 8 89 #define FIFO_ERR_INFO_MSK (0xff << FIFO_ERR_INFO_OFF) 90 #define TAB_RD_TYPE 0x15c 91 #define INT_COAL_EN 0x19c 92 #define OQ_INT_COAL_TIME 0x1a0 93 #define OQ_INT_COAL_CNT 0x1a4 94 #define ENT_INT_COAL_TIME 0x1a8 95 #define ENT_INT_COAL_CNT 0x1ac 96 #define OQ_INT_SRC 0x1b0 97 #define OQ_INT_SRC_MSK 0x1b4 98 #define ENT_INT_SRC1 0x1b8 99 #define ENT_INT_SRC1_D2H_FIS_CH0_OFF 0 100 #define ENT_INT_SRC1_D2H_FIS_CH0_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH0_OFF) 101 #define ENT_INT_SRC1_D2H_FIS_CH1_OFF 8 102 #define ENT_INT_SRC1_D2H_FIS_CH1_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH1_OFF) 103 #define ENT_INT_SRC2 0x1bc 104 #define ENT_INT_SRC3 0x1c0 105 #define ENT_INT_SRC3_WP_DEPTH_OFF 8 106 #define ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF 9 107 #define ENT_INT_SRC3_RP_DEPTH_OFF 10 108 #define ENT_INT_SRC3_AXI_OFF 11 109 #define ENT_INT_SRC3_FIFO_OFF 12 110 #define ENT_INT_SRC3_LM_OFF 14 111 #define ENT_INT_SRC3_ITC_INT_OFF 15 112 #define ENT_INT_SRC3_ITC_INT_MSK (0x1 << ENT_INT_SRC3_ITC_INT_OFF) 113 #define ENT_INT_SRC3_ABT_OFF 16 114 #define ENT_INT_SRC3_DQE_POISON_OFF 18 115 #define ENT_INT_SRC3_IOST_POISON_OFF 19 116 #define ENT_INT_SRC3_ITCT_POISON_OFF 20 117 #define ENT_INT_SRC3_ITCT_NCQ_POISON_OFF 21 118 #define ENT_INT_SRC_MSK1 0x1c4 119 #define ENT_INT_SRC_MSK2 0x1c8 120 #define ENT_INT_SRC_MSK3 0x1cc 121 #define ENT_INT_SRC_MSK3_ENT95_MSK_OFF 31 122 #define CHNL_PHYUPDOWN_INT_MSK 0x1d0 123 #define CHNL_ENT_INT_MSK 0x1d4 124 #define HGC_COM_INT_MSK 0x1d8 125 #define ENT_INT_SRC_MSK3_ENT95_MSK_MSK (0x1 << ENT_INT_SRC_MSK3_ENT95_MSK_OFF) 126 #define SAS_ECC_INTR 0x1e8 127 #define SAS_ECC_INTR_DQE_ECC_1B_OFF 0 128 #define SAS_ECC_INTR_DQE_ECC_MB_OFF 1 129 #define SAS_ECC_INTR_IOST_ECC_1B_OFF 2 130 #define SAS_ECC_INTR_IOST_ECC_MB_OFF 3 131 #define SAS_ECC_INTR_ITCT_ECC_1B_OFF 4 132 #define SAS_ECC_INTR_ITCT_ECC_MB_OFF 5 133 #define SAS_ECC_INTR_ITCTLIST_ECC_1B_OFF 6 134 #define SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF 7 135 #define SAS_ECC_INTR_IOSTLIST_ECC_1B_OFF 8 136 #define SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF 9 137 #define SAS_ECC_INTR_CQE_ECC_1B_OFF 10 138 #define SAS_ECC_INTR_CQE_ECC_MB_OFF 11 139 #define SAS_ECC_INTR_NCQ_MEM0_ECC_1B_OFF 12 140 #define SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF 13 141 #define SAS_ECC_INTR_NCQ_MEM1_ECC_1B_OFF 14 142 #define SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF 15 143 #define SAS_ECC_INTR_NCQ_MEM2_ECC_1B_OFF 16 144 #define SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF 17 145 #define SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF 18 146 #define SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF 19 147 #define SAS_ECC_INTR_OOO_RAM_ECC_1B_OFF 20 148 #define SAS_ECC_INTR_OOO_RAM_ECC_MB_OFF 21 149 #define SAS_ECC_INTR_MSK 0x1ec 150 #define HGC_ERR_STAT_EN 0x238 151 #define CQE_SEND_CNT 0x248 152 #define DLVRY_Q_0_BASE_ADDR_LO 0x260 153 #define DLVRY_Q_0_BASE_ADDR_HI 0x264 154 #define DLVRY_Q_0_DEPTH 0x268 155 #define DLVRY_Q_0_WR_PTR 0x26c 156 #define DLVRY_Q_0_RD_PTR 0x270 157 #define HYPER_STREAM_ID_EN_CFG 0xc80 158 #define OQ0_INT_SRC_MSK 0xc90 159 #define COMPL_Q_0_BASE_ADDR_LO 0x4e0 160 #define COMPL_Q_0_BASE_ADDR_HI 0x4e4 161 #define COMPL_Q_0_DEPTH 0x4e8 162 #define COMPL_Q_0_WR_PTR 0x4ec 163 #define COMPL_Q_0_RD_PTR 0x4f0 164 #define HGC_RXM_DFX_STATUS14 0xae8 165 #define HGC_RXM_DFX_STATUS14_MEM0_OFF 0 166 #define HGC_RXM_DFX_STATUS14_MEM0_MSK (0x1ff << \ 167 HGC_RXM_DFX_STATUS14_MEM0_OFF) 168 #define HGC_RXM_DFX_STATUS14_MEM1_OFF 9 169 #define HGC_RXM_DFX_STATUS14_MEM1_MSK (0x1ff << \ 170 HGC_RXM_DFX_STATUS14_MEM1_OFF) 171 #define HGC_RXM_DFX_STATUS14_MEM2_OFF 18 172 #define HGC_RXM_DFX_STATUS14_MEM2_MSK (0x1ff << \ 173 HGC_RXM_DFX_STATUS14_MEM2_OFF) 174 #define HGC_RXM_DFX_STATUS15 0xaec 175 #define HGC_RXM_DFX_STATUS15_MEM3_OFF 0 176 #define HGC_RXM_DFX_STATUS15_MEM3_MSK (0x1ff << \ 177 HGC_RXM_DFX_STATUS15_MEM3_OFF) 178 #define AWQOS_AWCACHE_CFG 0xc84 179 #define ARQOS_ARCACHE_CFG 0xc88 180 #define HILINK_ERR_DFX 0xe04 181 #define SAS_GPIO_CFG_0 0x1000 182 #define SAS_GPIO_CFG_1 0x1004 183 #define SAS_GPIO_TX_0_1 0x1040 184 #define SAS_CFG_DRIVE_VLD 0x1070 185 186 /* phy registers requiring init */ 187 #define PORT_BASE (0x2000) 188 #define PHY_CFG (PORT_BASE + 0x0) 189 #define HARD_PHY_LINKRATE (PORT_BASE + 0x4) 190 #define PHY_CFG_ENA_OFF 0 191 #define PHY_CFG_ENA_MSK (0x1 << PHY_CFG_ENA_OFF) 192 #define PHY_CFG_DC_OPT_OFF 2 193 #define PHY_CFG_DC_OPT_MSK (0x1 << PHY_CFG_DC_OPT_OFF) 194 #define PHY_CFG_PHY_RST_OFF 3 195 #define PHY_CFG_PHY_RST_MSK (0x1 << PHY_CFG_PHY_RST_OFF) 196 #define PROG_PHY_LINK_RATE (PORT_BASE + 0x8) 197 #define CFG_PROG_PHY_LINK_RATE_OFF 0 198 #define CFG_PROG_PHY_LINK_RATE_MSK (0xff << CFG_PROG_PHY_LINK_RATE_OFF) 199 #define CFG_PROG_OOB_PHY_LINK_RATE_OFF 8 200 #define CFG_PROG_OOB_PHY_LINK_RATE_MSK (0xf << CFG_PROG_OOB_PHY_LINK_RATE_OFF) 201 #define PHY_CTRL (PORT_BASE + 0x14) 202 #define PHY_CTRL_RESET_OFF 0 203 #define PHY_CTRL_RESET_MSK (0x1 << PHY_CTRL_RESET_OFF) 204 #define CMD_HDR_PIR_OFF 8 205 #define CMD_HDR_PIR_MSK (0x1 << CMD_HDR_PIR_OFF) 206 #define SERDES_CFG (PORT_BASE + 0x1c) 207 #define CFG_ALOS_CHK_DISABLE_OFF 9 208 #define CFG_ALOS_CHK_DISABLE_MSK (0x1 << CFG_ALOS_CHK_DISABLE_OFF) 209 #define SAS_PHY_BIST_CTRL (PORT_BASE + 0x2c) 210 #define CFG_BIST_MODE_SEL_OFF 0 211 #define CFG_BIST_MODE_SEL_MSK (0xf << CFG_BIST_MODE_SEL_OFF) 212 #define CFG_LOOP_TEST_MODE_OFF 14 213 #define CFG_LOOP_TEST_MODE_MSK (0x3 << CFG_LOOP_TEST_MODE_OFF) 214 #define CFG_RX_BIST_EN_OFF 16 215 #define CFG_RX_BIST_EN_MSK (0x1 << CFG_RX_BIST_EN_OFF) 216 #define CFG_TX_BIST_EN_OFF 17 217 #define CFG_TX_BIST_EN_MSK (0x1 << CFG_TX_BIST_EN_OFF) 218 #define CFG_BIST_TEST_OFF 18 219 #define CFG_BIST_TEST_MSK (0x1 << CFG_BIST_TEST_OFF) 220 #define SAS_PHY_BIST_CODE (PORT_BASE + 0x30) 221 #define SAS_PHY_BIST_CODE1 (PORT_BASE + 0x34) 222 #define SAS_BIST_ERR_CNT (PORT_BASE + 0x38) 223 #define SL_CFG (PORT_BASE + 0x84) 224 #define AIP_LIMIT (PORT_BASE + 0x90) 225 #define SL_CONTROL (PORT_BASE + 0x94) 226 #define SL_CONTROL_NOTIFY_EN_OFF 0 227 #define SL_CONTROL_NOTIFY_EN_MSK (0x1 << SL_CONTROL_NOTIFY_EN_OFF) 228 #define SL_CTA_OFF 17 229 #define SL_CTA_MSK (0x1 << SL_CTA_OFF) 230 #define RX_PRIMS_STATUS (PORT_BASE + 0x98) 231 #define RX_BCAST_CHG_OFF 1 232 #define RX_BCAST_CHG_MSK (0x1 << RX_BCAST_CHG_OFF) 233 #define TX_ID_DWORD0 (PORT_BASE + 0x9c) 234 #define TX_ID_DWORD1 (PORT_BASE + 0xa0) 235 #define TX_ID_DWORD2 (PORT_BASE + 0xa4) 236 #define TX_ID_DWORD3 (PORT_BASE + 0xa8) 237 #define TX_ID_DWORD4 (PORT_BASE + 0xaC) 238 #define TX_ID_DWORD5 (PORT_BASE + 0xb0) 239 #define TX_ID_DWORD6 (PORT_BASE + 0xb4) 240 #define TXID_AUTO (PORT_BASE + 0xb8) 241 #define CT3_OFF 1 242 #define CT3_MSK (0x1 << CT3_OFF) 243 #define TX_HARDRST_OFF 2 244 #define TX_HARDRST_MSK (0x1 << TX_HARDRST_OFF) 245 #define RX_IDAF_DWORD0 (PORT_BASE + 0xc4) 246 #define RXOP_CHECK_CFG_H (PORT_BASE + 0xfc) 247 #define STP_LINK_TIMER (PORT_BASE + 0x120) 248 #define STP_LINK_TIMEOUT_STATE (PORT_BASE + 0x124) 249 #define CON_CFG_DRIVER (PORT_BASE + 0x130) 250 #define SAS_SSP_CON_TIMER_CFG (PORT_BASE + 0x134) 251 #define SAS_SMP_CON_TIMER_CFG (PORT_BASE + 0x138) 252 #define SAS_STP_CON_TIMER_CFG (PORT_BASE + 0x13c) 253 #define CHL_INT0 (PORT_BASE + 0x1b4) 254 #define CHL_INT0_HOTPLUG_TOUT_OFF 0 255 #define CHL_INT0_HOTPLUG_TOUT_MSK (0x1 << CHL_INT0_HOTPLUG_TOUT_OFF) 256 #define CHL_INT0_SL_RX_BCST_ACK_OFF 1 257 #define CHL_INT0_SL_RX_BCST_ACK_MSK (0x1 << CHL_INT0_SL_RX_BCST_ACK_OFF) 258 #define CHL_INT0_SL_PHY_ENABLE_OFF 2 259 #define CHL_INT0_SL_PHY_ENABLE_MSK (0x1 << CHL_INT0_SL_PHY_ENABLE_OFF) 260 #define CHL_INT0_NOT_RDY_OFF 4 261 #define CHL_INT0_NOT_RDY_MSK (0x1 << CHL_INT0_NOT_RDY_OFF) 262 #define CHL_INT0_PHY_RDY_OFF 5 263 #define CHL_INT0_PHY_RDY_MSK (0x1 << CHL_INT0_PHY_RDY_OFF) 264 #define CHL_INT1 (PORT_BASE + 0x1b8) 265 #define CHL_INT1_DMAC_TX_ECC_MB_ERR_OFF 15 266 #define CHL_INT1_DMAC_TX_ECC_1B_ERR_OFF 16 267 #define CHL_INT1_DMAC_RX_ECC_MB_ERR_OFF 17 268 #define CHL_INT1_DMAC_RX_ECC_1B_ERR_OFF 18 269 #define CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF 19 270 #define CHL_INT1_DMAC_TX_AXI_RD_ERR_OFF 20 271 #define CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF 21 272 #define CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF 22 273 #define CHL_INT1_DMAC_TX_FIFO_ERR_OFF 23 274 #define CHL_INT1_DMAC_RX_FIFO_ERR_OFF 24 275 #define CHL_INT1_DMAC_TX_AXI_RUSER_ERR_OFF 26 276 #define CHL_INT1_DMAC_RX_AXI_RUSER_ERR_OFF 27 277 #define CHL_INT2 (PORT_BASE + 0x1bc) 278 #define CHL_INT2_SL_IDAF_TOUT_CONF_OFF 0 279 #define CHL_INT2_RX_DISP_ERR_OFF 28 280 #define CHL_INT2_RX_CODE_ERR_OFF 29 281 #define CHL_INT2_RX_INVLD_DW_OFF 30 282 #define CHL_INT2_STP_LINK_TIMEOUT_OFF 31 283 #define CHL_INT0_MSK (PORT_BASE + 0x1c0) 284 #define CHL_INT1_MSK (PORT_BASE + 0x1c4) 285 #define CHL_INT2_MSK (PORT_BASE + 0x1c8) 286 #define SAS_EC_INT_COAL_TIME (PORT_BASE + 0x1cc) 287 #define CHL_INT_COAL_EN (PORT_BASE + 0x1d0) 288 #define SAS_RX_TRAIN_TIMER (PORT_BASE + 0x2a4) 289 #define PHY_CTRL_RDY_MSK (PORT_BASE + 0x2b0) 290 #define PHYCTRL_NOT_RDY_MSK (PORT_BASE + 0x2b4) 291 #define PHYCTRL_DWS_RESET_MSK (PORT_BASE + 0x2b8) 292 #define PHYCTRL_PHY_ENA_MSK (PORT_BASE + 0x2bc) 293 #define SL_RX_BCAST_CHK_MSK (PORT_BASE + 0x2c0) 294 #define PHYCTRL_OOB_RESTART_MSK (PORT_BASE + 0x2c4) 295 #define DMA_TX_STATUS (PORT_BASE + 0x2d0) 296 #define DMA_TX_STATUS_BUSY_OFF 0 297 #define DMA_TX_STATUS_BUSY_MSK (0x1 << DMA_TX_STATUS_BUSY_OFF) 298 #define DMA_RX_STATUS (PORT_BASE + 0x2e8) 299 #define DMA_RX_STATUS_BUSY_OFF 0 300 #define DMA_RX_STATUS_BUSY_MSK (0x1 << DMA_RX_STATUS_BUSY_OFF) 301 302 #define COARSETUNE_TIME (PORT_BASE + 0x304) 303 #define TXDEEMPH_G1 (PORT_BASE + 0x350) 304 #define ERR_CNT_DWS_LOST (PORT_BASE + 0x380) 305 #define ERR_CNT_RESET_PROB (PORT_BASE + 0x384) 306 #define ERR_CNT_INVLD_DW (PORT_BASE + 0x390) 307 #define ERR_CNT_CODE_ERR (PORT_BASE + 0x394) 308 #define ERR_CNT_DISP_ERR (PORT_BASE + 0x398) 309 #define DFX_FIFO_CTRL (PORT_BASE + 0x3a0) 310 #define DFX_FIFO_CTRL_TRIGGER_MODE_OFF 0 311 #define DFX_FIFO_CTRL_TRIGGER_MODE_MSK (0x7 << DFX_FIFO_CTRL_TRIGGER_MODE_OFF) 312 #define DFX_FIFO_CTRL_DUMP_MODE_OFF 3 313 #define DFX_FIFO_CTRL_DUMP_MODE_MSK (0x7 << DFX_FIFO_CTRL_DUMP_MODE_OFF) 314 #define DFX_FIFO_CTRL_SIGNAL_SEL_OFF 6 315 #define DFX_FIFO_CTRL_SIGNAL_SEL_MSK (0xF << DFX_FIFO_CTRL_SIGNAL_SEL_OFF) 316 #define DFX_FIFO_CTRL_DUMP_DISABLE_OFF 10 317 #define DFX_FIFO_CTRL_DUMP_DISABLE_MSK (0x1 << DFX_FIFO_CTRL_DUMP_DISABLE_OFF) 318 #define DFX_FIFO_TRIGGER (PORT_BASE + 0x3a4) 319 #define DFX_FIFO_TRIGGER_MSK (PORT_BASE + 0x3a8) 320 #define DFX_FIFO_DUMP_MSK (PORT_BASE + 0x3aC) 321 #define DFX_FIFO_RD_DATA (PORT_BASE + 0x3b0) 322 323 #define DEFAULT_ITCT_HW 2048 /* reset value, not reprogrammed */ 324 #if (HISI_SAS_MAX_DEVICES > DEFAULT_ITCT_HW) 325 #error Max ITCT exceeded 326 #endif 327 328 #define AXI_MASTER_CFG_BASE (0x5000) 329 #define AM_CTRL_GLOBAL (0x0) 330 #define AM_CTRL_SHUTDOWN_REQ_OFF 0 331 #define AM_CTRL_SHUTDOWN_REQ_MSK (0x1 << AM_CTRL_SHUTDOWN_REQ_OFF) 332 #define AM_CURR_TRANS_RETURN (0x150) 333 334 #define AM_CFG_MAX_TRANS (0x5010) 335 #define AM_CFG_SINGLE_PORT_MAX_TRANS (0x5014) 336 #define AXI_CFG (0x5100) 337 #define AM_ROB_ECC_ERR_ADDR (0x510c) 338 #define AM_ROB_ECC_ERR_ADDR_OFF 0 339 #define AM_ROB_ECC_ERR_ADDR_MSK 0xffffffff 340 341 /* RAS registers need init */ 342 #define RAS_BASE (0x6000) 343 #define SAS_RAS_INTR0 (RAS_BASE) 344 #define SAS_RAS_INTR1 (RAS_BASE + 0x04) 345 #define SAS_RAS_INTR0_MASK (RAS_BASE + 0x08) 346 #define SAS_RAS_INTR1_MASK (RAS_BASE + 0x0c) 347 #define CFG_SAS_RAS_INTR_MASK (RAS_BASE + 0x1c) 348 #define SAS_RAS_INTR2 (RAS_BASE + 0x20) 349 #define SAS_RAS_INTR2_MASK (RAS_BASE + 0x24) 350 351 /* HW dma structures */ 352 /* Delivery queue header */ 353 /* dw0 */ 354 #define CMD_HDR_ABORT_FLAG_OFF 0 355 #define CMD_HDR_ABORT_FLAG_MSK (0x3 << CMD_HDR_ABORT_FLAG_OFF) 356 #define CMD_HDR_ABORT_DEVICE_TYPE_OFF 2 357 #define CMD_HDR_ABORT_DEVICE_TYPE_MSK (0x1 << CMD_HDR_ABORT_DEVICE_TYPE_OFF) 358 #define CMD_HDR_RESP_REPORT_OFF 5 359 #define CMD_HDR_RESP_REPORT_MSK (0x1 << CMD_HDR_RESP_REPORT_OFF) 360 #define CMD_HDR_TLR_CTRL_OFF 6 361 #define CMD_HDR_TLR_CTRL_MSK (0x3 << CMD_HDR_TLR_CTRL_OFF) 362 #define CMD_HDR_PHY_ID_OFF 8 363 #define CMD_HDR_PHY_ID_MSK (0x1ff << CMD_HDR_PHY_ID_OFF) 364 #define CMD_HDR_FORCE_PHY_OFF 17 365 #define CMD_HDR_FORCE_PHY_MSK (0x1U << CMD_HDR_FORCE_PHY_OFF) 366 #define CMD_HDR_PORT_OFF 18 367 #define CMD_HDR_PORT_MSK (0xf << CMD_HDR_PORT_OFF) 368 #define CMD_HDR_PRIORITY_OFF 27 369 #define CMD_HDR_PRIORITY_MSK (0x1 << CMD_HDR_PRIORITY_OFF) 370 #define CMD_HDR_CMD_OFF 29 371 #define CMD_HDR_CMD_MSK (0x7 << CMD_HDR_CMD_OFF) 372 /* dw1 */ 373 #define CMD_HDR_UNCON_CMD_OFF 3 374 #define CMD_HDR_DIR_OFF 5 375 #define CMD_HDR_DIR_MSK (0x3 << CMD_HDR_DIR_OFF) 376 #define CMD_HDR_RESET_OFF 7 377 #define CMD_HDR_RESET_MSK (0x1 << CMD_HDR_RESET_OFF) 378 #define CMD_HDR_VDTL_OFF 10 379 #define CMD_HDR_VDTL_MSK (0x1 << CMD_HDR_VDTL_OFF) 380 #define CMD_HDR_FRAME_TYPE_OFF 11 381 #define CMD_HDR_FRAME_TYPE_MSK (0x1f << CMD_HDR_FRAME_TYPE_OFF) 382 #define CMD_HDR_DEV_ID_OFF 16 383 #define CMD_HDR_DEV_ID_MSK (0xffff << CMD_HDR_DEV_ID_OFF) 384 /* dw2 */ 385 #define CMD_HDR_CFL_OFF 0 386 #define CMD_HDR_CFL_MSK (0x1ff << CMD_HDR_CFL_OFF) 387 #define CMD_HDR_NCQ_TAG_OFF 10 388 #define CMD_HDR_NCQ_TAG_MSK (0x1f << CMD_HDR_NCQ_TAG_OFF) 389 #define CMD_HDR_MRFL_OFF 15 390 #define CMD_HDR_MRFL_MSK (0x1ff << CMD_HDR_MRFL_OFF) 391 #define CMD_HDR_SG_MOD_OFF 24 392 #define CMD_HDR_SG_MOD_MSK (0x3 << CMD_HDR_SG_MOD_OFF) 393 /* dw3 */ 394 #define CMD_HDR_IPTT_OFF 0 395 #define CMD_HDR_IPTT_MSK (0xffff << CMD_HDR_IPTT_OFF) 396 /* dw6 */ 397 #define CMD_HDR_DIF_SGL_LEN_OFF 0 398 #define CMD_HDR_DIF_SGL_LEN_MSK (0xffff << CMD_HDR_DIF_SGL_LEN_OFF) 399 #define CMD_HDR_DATA_SGL_LEN_OFF 16 400 #define CMD_HDR_DATA_SGL_LEN_MSK (0xffff << CMD_HDR_DATA_SGL_LEN_OFF) 401 /* dw7 */ 402 #define CMD_HDR_ADDR_MODE_SEL_OFF 15 403 #define CMD_HDR_ADDR_MODE_SEL_MSK (1 << CMD_HDR_ADDR_MODE_SEL_OFF) 404 #define CMD_HDR_ABORT_IPTT_OFF 16 405 #define CMD_HDR_ABORT_IPTT_MSK (0xffff << CMD_HDR_ABORT_IPTT_OFF) 406 407 /* Completion header */ 408 /* dw0 */ 409 #define CMPLT_HDR_CMPLT_OFF 0 410 #define CMPLT_HDR_CMPLT_MSK (0x3 << CMPLT_HDR_CMPLT_OFF) 411 #define CMPLT_HDR_ERROR_PHASE_OFF 2 412 #define CMPLT_HDR_ERROR_PHASE_MSK (0xff << CMPLT_HDR_ERROR_PHASE_OFF) 413 /* bit[9:2] Error Phase */ 414 #define ERR_PHASE_RESPONSE_FRAME_REV_STAGE_OFF \ 415 8 416 #define ERR_PHASE_RESPONSE_FRAME_REV_STAGE_MSK \ 417 (0x1 << ERR_PHASE_RESPONSE_FRAME_REV_STAGE_OFF) 418 #define CMPLT_HDR_RSPNS_XFRD_OFF 10 419 #define CMPLT_HDR_RSPNS_XFRD_MSK (0x1 << CMPLT_HDR_RSPNS_XFRD_OFF) 420 #define CMPLT_HDR_RSPNS_GOOD_OFF 11 421 #define CMPLT_HDR_RSPNS_GOOD_MSK (0x1 << CMPLT_HDR_RSPNS_GOOD_OFF) 422 #define CMPLT_HDR_ERX_OFF 12 423 #define CMPLT_HDR_ERX_MSK (0x1 << CMPLT_HDR_ERX_OFF) 424 #define CMPLT_HDR_ABORT_STAT_OFF 13 425 #define CMPLT_HDR_ABORT_STAT_MSK (0x7 << CMPLT_HDR_ABORT_STAT_OFF) 426 /* abort_stat */ 427 #define STAT_IO_NOT_VALID 0x1 428 #define STAT_IO_NO_DEVICE 0x2 429 #define STAT_IO_COMPLETE 0x3 430 #define STAT_IO_ABORTED 0x4 431 /* dw1 */ 432 #define CMPLT_HDR_IPTT_OFF 0 433 #define CMPLT_HDR_IPTT_MSK (0xffff << CMPLT_HDR_IPTT_OFF) 434 #define CMPLT_HDR_DEV_ID_OFF 16 435 #define CMPLT_HDR_DEV_ID_MSK (0xffff << CMPLT_HDR_DEV_ID_OFF) 436 /* dw3 */ 437 #define SATA_DISK_IN_ERROR_STATUS_OFF 8 438 #define SATA_DISK_IN_ERROR_STATUS_MSK (0x1 << SATA_DISK_IN_ERROR_STATUS_OFF) 439 #define CMPLT_HDR_SATA_DISK_ERR_OFF 16 440 #define CMPLT_HDR_SATA_DISK_ERR_MSK (0x1 << CMPLT_HDR_SATA_DISK_ERR_OFF) 441 #define CMPLT_HDR_IO_IN_TARGET_OFF 17 442 #define CMPLT_HDR_IO_IN_TARGET_MSK (0x1 << CMPLT_HDR_IO_IN_TARGET_OFF) 443 /* bit[23:18] ERR_FIS_ATA_STATUS */ 444 #define FIS_ATA_STATUS_ERR_OFF 18 445 #define FIS_ATA_STATUS_ERR_MSK (0x1 << FIS_ATA_STATUS_ERR_OFF) 446 #define FIS_TYPE_SDB_OFF 31 447 #define FIS_TYPE_SDB_MSK (0x1 << FIS_TYPE_SDB_OFF) 448 449 /* ITCT header */ 450 /* qw0 */ 451 #define ITCT_HDR_DEV_TYPE_OFF 0 452 #define ITCT_HDR_DEV_TYPE_MSK (0x3 << ITCT_HDR_DEV_TYPE_OFF) 453 #define ITCT_HDR_VALID_OFF 2 454 #define ITCT_HDR_VALID_MSK (0x1 << ITCT_HDR_VALID_OFF) 455 #define ITCT_HDR_MCR_OFF 5 456 #define ITCT_HDR_MCR_MSK (0xf << ITCT_HDR_MCR_OFF) 457 #define ITCT_HDR_VLN_OFF 9 458 #define ITCT_HDR_VLN_MSK (0xf << ITCT_HDR_VLN_OFF) 459 #define ITCT_HDR_SMP_TIMEOUT_OFF 16 460 #define ITCT_HDR_AWT_CONTINUE_OFF 25 461 #define ITCT_HDR_PORT_ID_OFF 28 462 #define ITCT_HDR_PORT_ID_MSK (0xf << ITCT_HDR_PORT_ID_OFF) 463 /* qw2 */ 464 #define ITCT_HDR_INLT_OFF 0 465 #define ITCT_HDR_INLT_MSK (0xffffULL << ITCT_HDR_INLT_OFF) 466 #define ITCT_HDR_RTOLT_OFF 48 467 #define ITCT_HDR_RTOLT_MSK (0xffffULL << ITCT_HDR_RTOLT_OFF) 468 469 /*debugfs*/ 470 #define TWO_PARA_PER_LINE 2 471 #define FOUR_PARA_PER_LINE 4 472 #define DUMP_BUF_SIZE 8 473 #define BIST_BUF_SIZE 16 474 475 struct hisi_sas_protect_iu_v3_hw { 476 u32 dw0; 477 u32 lbrtcv; 478 u32 lbrtgv; 479 u32 dw3; 480 u32 dw4; 481 u32 dw5; 482 u32 rsv; 483 }; 484 485 struct hisi_sas_complete_v3_hdr { 486 __le32 dw0; 487 __le32 dw1; 488 __le32 act; 489 __le32 dw3; 490 }; 491 492 struct hisi_sas_err_record_v3 { 493 /* dw0 */ 494 __le32 trans_tx_fail_type; 495 496 /* dw1 */ 497 __le32 trans_rx_fail_type; 498 499 /* dw2 */ 500 __le16 dma_tx_err_type; 501 __le16 sipc_rx_err_type; 502 503 /* dw3 */ 504 __le32 dma_rx_err_type; 505 }; 506 507 #define RX_DATA_LEN_UNDERFLOW_OFF 6 508 #define RX_DATA_LEN_UNDERFLOW_MSK (1 << RX_DATA_LEN_UNDERFLOW_OFF) 509 510 #define RX_FIS_STATUS_ERR_OFF 0 511 #define RX_FIS_STATUS_ERR_MSK (1 << RX_FIS_STATUS_ERR_OFF) 512 513 #define HISI_SAS_COMMAND_ENTRIES_V3_HW 4096 514 #define HISI_SAS_MSI_COUNT_V3_HW 32 515 516 #define DIR_NO_DATA 0 517 #define DIR_TO_INI 1 518 #define DIR_TO_DEVICE 2 519 #define DIR_RESERVED 3 520 521 #define FIS_CMD_IS_UNCONSTRAINED(fis) \ 522 ((fis.command == ATA_CMD_READ_LOG_EXT) || \ 523 (fis.command == ATA_CMD_READ_LOG_DMA_EXT) || \ 524 ((fis.command == ATA_CMD_DEV_RESET) && \ 525 ((fis.control & ATA_SRST) != 0))) 526 527 #define T10_INSRT_EN_OFF 0 528 #define T10_INSRT_EN_MSK (1 << T10_INSRT_EN_OFF) 529 #define T10_RMV_EN_OFF 1 530 #define T10_RMV_EN_MSK (1 << T10_RMV_EN_OFF) 531 #define T10_RPLC_EN_OFF 2 532 #define T10_RPLC_EN_MSK (1 << T10_RPLC_EN_OFF) 533 #define T10_CHK_EN_OFF 3 534 #define T10_CHK_EN_MSK (1 << T10_CHK_EN_OFF) 535 #define INCR_LBRT_OFF 5 536 #define INCR_LBRT_MSK (1 << INCR_LBRT_OFF) 537 #define USR_DATA_BLOCK_SZ_OFF 20 538 #define USR_DATA_BLOCK_SZ_MSK (0x3 << USR_DATA_BLOCK_SZ_OFF) 539 #define T10_CHK_MSK_OFF 16 540 #define T10_CHK_REF_TAG_MSK (0xf0 << T10_CHK_MSK_OFF) 541 #define T10_CHK_APP_TAG_MSK (0xc << T10_CHK_MSK_OFF) 542 543 #define BASE_VECTORS_V3_HW 16 544 #define MIN_AFFINE_VECTORS_V3_HW (BASE_VECTORS_V3_HW + 1) 545 #define IRQ_PHY_UP_DOWN_INDEX 1 546 #define IRQ_CHL_INDEX 2 547 #define IRQ_AXI_INDEX 11 548 549 #define DELAY_FOR_RESET_HW 100 550 #define HDR_SG_MOD 0x2 551 #define LUN_SIZE 8 552 #define ATTR_PRIO_REGION 9 553 #define CDB_REGION 12 554 #define PRIO_OFF 3 555 #define TMF_REGION 10 556 #define TAG_MSB 12 557 #define TAG_LSB 13 558 #define SMP_FRAME_TYPE 2 559 #define SMP_CRC_SIZE 4 560 #define HDR_TAG_OFF 3 561 #define HOST_NO_OFF 6 562 #define PHY_NO_OFF 7 563 #define IDENTIFY_REG_READ 6 564 #define LINK_RESET_TIMEOUT_OFF 4 565 #define DECIMALISM_FLAG 10 566 #define WAIT_RETRY 100 567 #define WAIT_TMROUT 5000 568 569 #define ID_DWORD0_INDEX 0 570 #define ID_DWORD1_INDEX 1 571 #define ID_DWORD2_INDEX 2 572 #define ID_DWORD3_INDEX 3 573 #define ID_DWORD4_INDEX 4 574 #define ID_DWORD5_INDEX 5 575 #define TICKS_BIT_INDEX 24 576 #define COUNT_BIT_INDEX 8 577 578 #define PORT_REG_LENGTH 0x100 579 #define GLOBAL_REG_LENGTH 0x800 580 #define AXI_REG_LENGTH 0x61 581 #define RAS_REG_LENGTH 0x10 582 583 #define CHNL_INT_STS_MSK 0xeeeeeeee 584 #define CHNL_INT_STS_PHY_MSK 0xe 585 #define CHNL_INT_STS_INT0_MSK BIT(1) 586 #define CHNL_INT_STS_INT1_MSK BIT(2) 587 #define CHNL_INT_STS_INT2_MSK BIT(3) 588 #define CHNL_WIDTH 4 589 590 #define BAR_NO_V3_HW 5 591 592 enum { 593 DSM_FUNC_ERR_HANDLE_MSI = 0, 594 }; 595 596 static bool hisi_sas_intr_conv; 597 MODULE_PARM_DESC(intr_conv, "interrupt converge enable (0-1)"); 598 599 /* permit overriding the host protection capabilities mask (EEDP/T10 PI) */ 600 static int prot_mask; 601 module_param(prot_mask, int, 0444); 602 MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=0x0 "); 603 604 /* the index of iopoll queues are bigger than interrupt queues' */ 605 static int experimental_iopoll_q_cnt; 606 module_param(experimental_iopoll_q_cnt, int, 0444); 607 MODULE_PARM_DESC(experimental_iopoll_q_cnt, "number of queues to be used as poll mode, def=0"); 608 609 static int debugfs_snapshot_regs_v3_hw(struct hisi_hba *hisi_hba); 610 611 static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off) 612 { 613 void __iomem *regs = hisi_hba->regs + off; 614 615 return readl(regs); 616 } 617 618 static void hisi_sas_write32(struct hisi_hba *hisi_hba, u32 off, u32 val) 619 { 620 void __iomem *regs = hisi_hba->regs + off; 621 622 writel(val, regs); 623 } 624 625 static void hisi_sas_phy_write32(struct hisi_hba *hisi_hba, int phy_no, 626 u32 off, u32 val) 627 { 628 void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off; 629 630 writel(val, regs); 631 } 632 633 static u32 hisi_sas_phy_read32(struct hisi_hba *hisi_hba, 634 int phy_no, u32 off) 635 { 636 void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off; 637 638 return readl(regs); 639 } 640 641 #define hisi_sas_read32_poll_timeout(off, val, cond, delay_us, \ 642 timeout_us) \ 643 ({ \ 644 void __iomem *regs = hisi_hba->regs + off; \ 645 readl_poll_timeout(regs, val, cond, delay_us, timeout_us); \ 646 }) 647 648 #define hisi_sas_read32_poll_timeout_atomic(off, val, cond, delay_us, \ 649 timeout_us) \ 650 ({ \ 651 void __iomem *regs = hisi_hba->regs + off; \ 652 readl_poll_timeout_atomic(regs, val, cond, delay_us, timeout_us);\ 653 }) 654 655 static void interrupt_enable_v3_hw(struct hisi_hba *hisi_hba) 656 { 657 int i; 658 659 for (i = 0; i < hisi_hba->queue_count; i++) 660 hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK + 0x4 * i, 0); 661 662 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xfefefefe); 663 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xfefefefe); 664 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffc220ff); 665 hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0x155555); 666 667 for (i = 0; i < hisi_hba->n_phy; i++) { 668 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xf2057fff); 669 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0xffffbfe); 670 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0); 671 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_PHY_ENA_MSK, 0x0); 672 hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x0); 673 } 674 } 675 676 static void init_reg_v3_hw(struct hisi_hba *hisi_hba) 677 { 678 struct pci_dev *pdev = hisi_hba->pci_dev; 679 int i, j; 680 681 /* Global registers init */ 682 hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 683 (u32)((1ULL << hisi_hba->queue_count) - 1)); 684 hisi_sas_write32(hisi_hba, CFG_MAX_TAG, 0xfff0400); 685 /* time / CLK_AHB = 2.5s / 2ns = 0x4A817C80 */ 686 hisi_sas_write32(hisi_hba, TRANS_LOCK_ICT_TIME, 0x4A817C80); 687 hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x108); 688 hisi_sas_write32(hisi_hba, CFG_AGING_TIME, 0x1); 689 hisi_sas_write32(hisi_hba, CFG_ICT_TIMER_STEP_TRSH, 0xf4240); 690 hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x3); 691 /* configure the interrupt coalescing timeout period 10us */ 692 hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0xa); 693 /* configure the count of CQ entries 10 */ 694 hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0xa); 695 hisi_sas_write32(hisi_hba, CQ_INT_CONVERGE_EN, 696 hisi_sas_intr_conv); 697 hisi_sas_write32(hisi_hba, OQ_INT_SRC, 0xffff); 698 hisi_sas_write32(hisi_hba, ENT_INT_SRC1, 0xffffffff); 699 hisi_sas_write32(hisi_hba, ENT_INT_SRC2, 0xffffffff); 700 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 0xffffffff); 701 hisi_sas_write32(hisi_hba, CHNL_PHYUPDOWN_INT_MSK, 0x0); 702 hisi_sas_write32(hisi_hba, CHNL_ENT_INT_MSK, 0x0); 703 hisi_sas_write32(hisi_hba, HGC_COM_INT_MSK, 0x0); 704 hisi_sas_write32(hisi_hba, AWQOS_AWCACHE_CFG, 0xf0f0); 705 hisi_sas_write32(hisi_hba, ARQOS_ARCACHE_CFG, 0xf0f0); 706 hisi_sas_write32(hisi_hba, HYPER_STREAM_ID_EN_CFG, 1); 707 708 if (pdev->revision < 0x30) 709 hisi_sas_write32(hisi_hba, SAS_AXI_USER3, 0); 710 711 interrupt_enable_v3_hw(hisi_hba); 712 for (i = 0; i < hisi_hba->n_phy; i++) { 713 enum sas_linkrate max; 714 struct hisi_sas_phy *phy = &hisi_hba->phy[i]; 715 struct asd_sas_phy *sas_phy = &phy->sas_phy; 716 u32 prog_phy_link_rate = hisi_sas_phy_read32(hisi_hba, i, 717 PROG_PHY_LINK_RATE); 718 719 prog_phy_link_rate &= ~CFG_PROG_PHY_LINK_RATE_MSK; 720 if (!sas_phy->phy || (sas_phy->phy->maximum_linkrate < 721 SAS_LINK_RATE_1_5_GBPS)) 722 max = SAS_LINK_RATE_12_0_GBPS; 723 else 724 max = sas_phy->phy->maximum_linkrate; 725 prog_phy_link_rate |= hisi_sas_get_prog_phy_linkrate_mask(max); 726 hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE, 727 prog_phy_link_rate); 728 hisi_sas_phy_write32(hisi_hba, i, SAS_RX_TRAIN_TIMER, 0x13e80); 729 hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, 0xffffffff); 730 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff); 731 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xffffffff); 732 hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000); 733 hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0); 734 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_DWS_RESET_MSK, 0x0); 735 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x1); 736 hisi_sas_phy_write32(hisi_hba, i, STP_LINK_TIMER, 0x7ffffff); 737 hisi_sas_phy_write32(hisi_hba, i, CON_CFG_DRIVER, 0x2a0a01); 738 hisi_sas_phy_write32(hisi_hba, i, SAS_EC_INT_COAL_TIME, 739 0x30f4240); 740 hisi_sas_phy_write32(hisi_hba, i, AIP_LIMIT, 0x2ffff); 741 742 /* set value through firmware for 920B and later version */ 743 if (pdev->revision < 0x30) { 744 hisi_sas_phy_write32(hisi_hba, i, SAS_SSP_CON_TIMER_CFG, 0x32); 745 hisi_sas_phy_write32(hisi_hba, i, SERDES_CFG, 0xffc00); 746 /* used for 12G negotiate */ 747 hisi_sas_phy_write32(hisi_hba, i, COARSETUNE_TIME, 0x1e); 748 } 749 750 /* get default FFE configuration for BIST */ 751 for (j = 0; j < FFE_CFG_MAX; j++) { 752 u32 val = hisi_sas_phy_read32(hisi_hba, i, 753 TXDEEMPH_G1 + (j * 0x4)); 754 hisi_hba->debugfs_bist_ffe[i][j] = val; 755 } 756 } 757 758 for (i = 0; i < hisi_hba->queue_count; i++) { 759 /* Delivery queue */ 760 hisi_sas_write32(hisi_hba, 761 DLVRY_Q_0_BASE_ADDR_HI + (i * 0x14), 762 upper_32_bits(hisi_hba->cmd_hdr_dma[i])); 763 764 hisi_sas_write32(hisi_hba, DLVRY_Q_0_BASE_ADDR_LO + (i * 0x14), 765 lower_32_bits(hisi_hba->cmd_hdr_dma[i])); 766 767 hisi_sas_write32(hisi_hba, DLVRY_Q_0_DEPTH + (i * 0x14), 768 HISI_SAS_QUEUE_SLOTS); 769 770 /* Completion queue */ 771 hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_HI + (i * 0x14), 772 upper_32_bits(hisi_hba->complete_hdr_dma[i])); 773 774 hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_LO + (i * 0x14), 775 lower_32_bits(hisi_hba->complete_hdr_dma[i])); 776 777 hisi_sas_write32(hisi_hba, COMPL_Q_0_DEPTH + (i * 0x14), 778 HISI_SAS_QUEUE_SLOTS); 779 } 780 781 /* itct */ 782 hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_LO, 783 lower_32_bits(hisi_hba->itct_dma)); 784 785 hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_HI, 786 upper_32_bits(hisi_hba->itct_dma)); 787 788 /* iost */ 789 hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_LO, 790 lower_32_bits(hisi_hba->iost_dma)); 791 792 hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_HI, 793 upper_32_bits(hisi_hba->iost_dma)); 794 795 /* breakpoint */ 796 hisi_sas_write32(hisi_hba, IO_BROKEN_MSG_ADDR_LO, 797 lower_32_bits(hisi_hba->breakpoint_dma)); 798 799 hisi_sas_write32(hisi_hba, IO_BROKEN_MSG_ADDR_HI, 800 upper_32_bits(hisi_hba->breakpoint_dma)); 801 802 /* SATA broken msg */ 803 hisi_sas_write32(hisi_hba, IO_SATA_BROKEN_MSG_ADDR_LO, 804 lower_32_bits(hisi_hba->sata_breakpoint_dma)); 805 806 hisi_sas_write32(hisi_hba, IO_SATA_BROKEN_MSG_ADDR_HI, 807 upper_32_bits(hisi_hba->sata_breakpoint_dma)); 808 809 /* SATA initial fis */ 810 hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_LO, 811 lower_32_bits(hisi_hba->initial_fis_dma)); 812 813 hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_HI, 814 upper_32_bits(hisi_hba->initial_fis_dma)); 815 816 /* RAS registers init */ 817 hisi_sas_write32(hisi_hba, SAS_RAS_INTR0_MASK, 0x0); 818 hisi_sas_write32(hisi_hba, SAS_RAS_INTR1_MASK, 0x0); 819 hisi_sas_write32(hisi_hba, SAS_RAS_INTR2_MASK, 0x0); 820 hisi_sas_write32(hisi_hba, CFG_SAS_RAS_INTR_MASK, 0x0); 821 822 /* LED registers init */ 823 hisi_sas_write32(hisi_hba, SAS_CFG_DRIVE_VLD, 0x80000ff); 824 hisi_sas_write32(hisi_hba, SAS_GPIO_TX_0_1, 0x80808080); 825 hisi_sas_write32(hisi_hba, SAS_GPIO_TX_0_1 + 0x4, 0x80808080); 826 /* Configure blink generator rate A to 1Hz and B to 4Hz */ 827 hisi_sas_write32(hisi_hba, SAS_GPIO_CFG_1, 0x121700); 828 hisi_sas_write32(hisi_hba, SAS_GPIO_CFG_0, 0x800000); 829 } 830 831 static void config_phy_opt_mode_v3_hw(struct hisi_hba *hisi_hba, int phy_no) 832 { 833 u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); 834 835 cfg &= ~PHY_CFG_DC_OPT_MSK; 836 cfg |= 1 << PHY_CFG_DC_OPT_OFF; 837 hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); 838 } 839 840 static void config_id_frame_v3_hw(struct hisi_hba *hisi_hba, int phy_no) 841 { 842 struct sas_identify_frame identify_frame; 843 u32 *identify_buffer; 844 845 memset(&identify_frame, 0, sizeof(identify_frame)); 846 identify_frame.dev_type = SAS_END_DEVICE; 847 identify_frame.frame_type = 0; 848 identify_frame._un1 = 1; 849 identify_frame.initiator_bits = SAS_PROTOCOL_ALL; 850 identify_frame.target_bits = SAS_PROTOCOL_NONE; 851 memcpy(&identify_frame._un4_11[0], hisi_hba->sas_addr, SAS_ADDR_SIZE); 852 memcpy(&identify_frame.sas_addr[0], hisi_hba->sas_addr, SAS_ADDR_SIZE); 853 identify_frame.phy_id = phy_no; 854 identify_buffer = (u32 *)(&identify_frame); 855 856 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD0, 857 __swab32(identify_buffer[ID_DWORD0_INDEX])); 858 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD1, 859 __swab32(identify_buffer[ID_DWORD1_INDEX])); 860 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD2, 861 __swab32(identify_buffer[ID_DWORD2_INDEX])); 862 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD3, 863 __swab32(identify_buffer[ID_DWORD3_INDEX])); 864 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD4, 865 __swab32(identify_buffer[ID_DWORD4_INDEX])); 866 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD5, 867 __swab32(identify_buffer[ID_DWORD5_INDEX])); 868 } 869 870 static void setup_itct_v3_hw(struct hisi_hba *hisi_hba, 871 struct hisi_sas_device *sas_dev) 872 { 873 struct domain_device *device = sas_dev->sas_device; 874 struct device *dev = hisi_hba->dev; 875 u64 qw0, device_id = sas_dev->device_id; 876 struct hisi_sas_itct *itct = &hisi_hba->itct[device_id]; 877 struct domain_device *parent_dev = device->parent; 878 struct asd_sas_port *sas_port = device->port; 879 struct hisi_sas_port *port = to_hisi_sas_port(sas_port); 880 u64 sas_addr; 881 882 memset(itct, 0, sizeof(*itct)); 883 884 /* qw0 */ 885 qw0 = 0; 886 switch (sas_dev->dev_type) { 887 case SAS_END_DEVICE: 888 case SAS_EDGE_EXPANDER_DEVICE: 889 case SAS_FANOUT_EXPANDER_DEVICE: 890 qw0 = HISI_SAS_DEV_TYPE_SSP << ITCT_HDR_DEV_TYPE_OFF; 891 break; 892 case SAS_SATA_DEV: 893 case SAS_SATA_PENDING: 894 if (parent_dev && dev_is_expander(parent_dev->dev_type)) 895 qw0 = HISI_SAS_DEV_TYPE_STP << ITCT_HDR_DEV_TYPE_OFF; 896 else 897 qw0 = HISI_SAS_DEV_TYPE_SATA << ITCT_HDR_DEV_TYPE_OFF; 898 break; 899 default: 900 dev_warn(dev, "setup itct: unsupported dev type (%d)\n", 901 sas_dev->dev_type); 902 } 903 904 qw0 |= ((1 << ITCT_HDR_VALID_OFF) | 905 (device->linkrate << ITCT_HDR_MCR_OFF) | 906 (1 << ITCT_HDR_VLN_OFF) | 907 (0xfa << ITCT_HDR_SMP_TIMEOUT_OFF) | 908 (1 << ITCT_HDR_AWT_CONTINUE_OFF) | 909 (port->id << ITCT_HDR_PORT_ID_OFF)); 910 itct->qw0 = cpu_to_le64(qw0); 911 912 /* qw1 */ 913 memcpy(&sas_addr, device->sas_addr, SAS_ADDR_SIZE); 914 itct->sas_addr = cpu_to_le64(__swab64(sas_addr)); 915 916 /* qw2 */ 917 if (!dev_is_sata(device)) 918 itct->qw2 = cpu_to_le64((5000ULL << ITCT_HDR_INLT_OFF) | 919 (0x1ULL << ITCT_HDR_RTOLT_OFF)); 920 } 921 922 static int clear_itct_v3_hw(struct hisi_hba *hisi_hba, 923 struct hisi_sas_device *sas_dev) 924 { 925 DECLARE_COMPLETION_ONSTACK(completion); 926 u64 dev_id = sas_dev->device_id; 927 struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id]; 928 u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3); 929 struct device *dev = hisi_hba->dev; 930 931 sas_dev->completion = &completion; 932 933 /* clear the itct interrupt state */ 934 if (ENT_INT_SRC3_ITC_INT_MSK & reg_val) 935 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 936 ENT_INT_SRC3_ITC_INT_MSK); 937 938 /* clear the itct table */ 939 reg_val = ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK); 940 hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val); 941 942 if (!wait_for_completion_timeout(sas_dev->completion, 943 HISI_SAS_CLEAR_ITCT_TIMEOUT)) { 944 dev_warn(dev, "failed to clear ITCT\n"); 945 return -ETIMEDOUT; 946 } 947 948 memset(itct, 0, sizeof(struct hisi_sas_itct)); 949 return 0; 950 } 951 952 static void dereg_device_v3_hw(struct hisi_hba *hisi_hba, 953 struct domain_device *device) 954 { 955 struct hisi_sas_slot *slot, *slot2; 956 struct hisi_sas_device *sas_dev = device->lldd_dev; 957 u32 cfg_abt_set_query_iptt; 958 959 cfg_abt_set_query_iptt = hisi_sas_read32(hisi_hba, 960 CFG_ABT_SET_QUERY_IPTT); 961 spin_lock(&sas_dev->lock); 962 list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry) { 963 cfg_abt_set_query_iptt &= ~CFG_SET_ABORTED_IPTT_MSK; 964 cfg_abt_set_query_iptt |= (1 << CFG_SET_ABORTED_EN_OFF) | 965 (slot->idx << CFG_SET_ABORTED_IPTT_OFF); 966 hisi_sas_write32(hisi_hba, CFG_ABT_SET_QUERY_IPTT, 967 cfg_abt_set_query_iptt); 968 } 969 spin_unlock(&sas_dev->lock); 970 cfg_abt_set_query_iptt &= ~(1 << CFG_SET_ABORTED_EN_OFF); 971 hisi_sas_write32(hisi_hba, CFG_ABT_SET_QUERY_IPTT, 972 cfg_abt_set_query_iptt); 973 hisi_sas_write32(hisi_hba, CFG_ABT_SET_IPTT_DONE, 974 1 << CFG_ABT_SET_IPTT_DONE_OFF); 975 } 976 977 static int reset_hw_v3_hw(struct hisi_hba *hisi_hba) 978 { 979 struct device *dev = hisi_hba->dev; 980 int ret; 981 u32 val; 982 983 hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0); 984 985 /* Disable all of the PHYs */ 986 hisi_sas_stop_phys(hisi_hba); 987 udelay(HISI_SAS_DELAY_FOR_PHY_DISABLE); 988 989 /* Ensure axi bus idle */ 990 ret = hisi_sas_read32_poll_timeout(AXI_CFG, val, !val, 991 20000, 1000000); 992 if (ret) { 993 dev_err(dev, "axi bus is not idle, ret = %d!\n", ret); 994 return -EIO; 995 } 996 997 if (ACPI_HANDLE(dev)) { 998 acpi_status s; 999 1000 s = acpi_evaluate_object(ACPI_HANDLE(dev), "_RST", NULL, NULL); 1001 if (ACPI_FAILURE(s)) { 1002 dev_err(dev, "Reset failed\n"); 1003 return -EIO; 1004 } 1005 } else { 1006 dev_err(dev, "no reset method!\n"); 1007 return -EINVAL; 1008 } 1009 1010 return 0; 1011 } 1012 1013 static int hw_init_v3_hw(struct hisi_hba *hisi_hba) 1014 { 1015 struct device *dev = hisi_hba->dev; 1016 struct acpi_device *acpi_dev; 1017 union acpi_object *obj; 1018 guid_t guid; 1019 int rc; 1020 1021 rc = reset_hw_v3_hw(hisi_hba); 1022 if (rc) { 1023 dev_err(dev, "hisi_sas_reset_hw failed, rc=%d\n", rc); 1024 return rc; 1025 } 1026 1027 msleep(DELAY_FOR_RESET_HW); 1028 init_reg_v3_hw(hisi_hba); 1029 1030 if (guid_parse("D5918B4B-37AE-4E10-A99F-E5E8A6EF4C1F", &guid)) { 1031 dev_err(dev, "Parse GUID failed\n"); 1032 return -EINVAL; 1033 } 1034 1035 /* 1036 * This DSM handles some hardware-related configurations: 1037 * 1. Switch over to MSI error handling in kernel 1038 * 2. BIOS *may* reset some register values through this method 1039 */ 1040 obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &guid, 0, 1041 DSM_FUNC_ERR_HANDLE_MSI, NULL); 1042 if (!obj) 1043 dev_warn(dev, "can not find DSM method, ignore\n"); 1044 else 1045 ACPI_FREE(obj); 1046 1047 acpi_dev = ACPI_COMPANION(dev); 1048 if (!acpi_device_power_manageable(acpi_dev)) 1049 dev_notice(dev, "neither _PS0 nor _PR0 is defined\n"); 1050 return 0; 1051 } 1052 1053 static void enable_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no) 1054 { 1055 u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); 1056 1057 cfg |= PHY_CFG_ENA_MSK; 1058 cfg &= ~PHY_CFG_PHY_RST_MSK; 1059 hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); 1060 } 1061 1062 static void disable_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no) 1063 { 1064 u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); 1065 u32 irq_msk = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2_MSK); 1066 static const u32 msk = BIT(CHL_INT2_RX_DISP_ERR_OFF) | 1067 BIT(CHL_INT2_RX_CODE_ERR_OFF) | 1068 BIT(CHL_INT2_RX_INVLD_DW_OFF); 1069 u32 state; 1070 1071 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2_MSK, msk | irq_msk); 1072 1073 cfg &= ~PHY_CFG_ENA_MSK; 1074 hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); 1075 1076 mdelay(HISI_SAS_DELAY_FOR_PHY_DISABLE); 1077 1078 state = hisi_sas_read32(hisi_hba, PHY_STATE); 1079 if (state & BIT(phy_no)) { 1080 cfg |= PHY_CFG_PHY_RST_MSK; 1081 hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); 1082 } 1083 1084 udelay(1); 1085 1086 hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_INVLD_DW); 1087 hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_DISP_ERR); 1088 hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_CODE_ERR); 1089 1090 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2, msk); 1091 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2_MSK, irq_msk); 1092 } 1093 1094 static void start_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no) 1095 { 1096 config_id_frame_v3_hw(hisi_hba, phy_no); 1097 config_phy_opt_mode_v3_hw(hisi_hba, phy_no); 1098 enable_phy_v3_hw(hisi_hba, phy_no); 1099 } 1100 1101 static void phy_hard_reset_v3_hw(struct hisi_hba *hisi_hba, int phy_no) 1102 { 1103 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1104 u32 txid_auto; 1105 1106 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 1107 if (phy->identify.device_type == SAS_END_DEVICE) { 1108 txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO); 1109 hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO, 1110 txid_auto | TX_HARDRST_MSK); 1111 } 1112 msleep(HISI_SAS_DELAY_FOR_PHY_DISABLE); 1113 hisi_sas_phy_enable(hisi_hba, phy_no, 1); 1114 } 1115 1116 static enum sas_linkrate phy_get_max_linkrate_v3_hw(void) 1117 { 1118 return SAS_LINK_RATE_12_0_GBPS; 1119 } 1120 1121 static void phys_init_v3_hw(struct hisi_hba *hisi_hba) 1122 { 1123 int i; 1124 1125 for (i = 0; i < hisi_hba->n_phy; i++) { 1126 struct hisi_sas_phy *phy = &hisi_hba->phy[i]; 1127 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1128 1129 if (!sas_phy->phy->enabled) 1130 continue; 1131 1132 hisi_sas_phy_enable(hisi_hba, i, 1); 1133 } 1134 } 1135 1136 static void sl_notify_ssp_v3_hw(struct hisi_hba *hisi_hba, int phy_no) 1137 { 1138 u32 sl_control; 1139 1140 sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL); 1141 sl_control |= SL_CONTROL_NOTIFY_EN_MSK; 1142 hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control); 1143 msleep(1); 1144 sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL); 1145 sl_control &= ~SL_CONTROL_NOTIFY_EN_MSK; 1146 hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control); 1147 } 1148 1149 static int get_wideport_bitmap_v3_hw(struct hisi_hba *hisi_hba, int port_id) 1150 { 1151 int i, bitmap = 0; 1152 u32 phy_port_num_ma = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA); 1153 u32 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE); 1154 1155 for (i = 0; i < hisi_hba->n_phy; i++) 1156 if (phy_state & BIT(i)) 1157 if (((phy_port_num_ma >> (i * HISI_SAS_REG_MEM_SIZE)) & 0xf) == 1158 port_id) 1159 bitmap |= BIT(i); 1160 1161 return bitmap; 1162 } 1163 1164 static void start_delivery_v3_hw(struct hisi_sas_dq *dq) 1165 { 1166 struct hisi_hba *hisi_hba = dq->hisi_hba; 1167 struct hisi_sas_slot *s, *s1, *s2 = NULL; 1168 int dlvry_queue = dq->id; 1169 int wp; 1170 1171 list_for_each_entry_safe(s, s1, &dq->list, delivery) { 1172 if (!s->ready) 1173 break; 1174 s2 = s; 1175 list_del(&s->delivery); 1176 } 1177 1178 if (!s2) 1179 return; 1180 1181 /* 1182 * Ensure that memories for slots built on other CPUs is observed. 1183 */ 1184 smp_rmb(); 1185 wp = (s2->dlvry_queue_slot + 1) % HISI_SAS_QUEUE_SLOTS; 1186 1187 hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), wp); 1188 } 1189 1190 static void prep_prd_sge_v3_hw(struct hisi_hba *hisi_hba, 1191 struct hisi_sas_slot *slot, 1192 struct hisi_sas_cmd_hdr *hdr, 1193 struct scatterlist *scatter, 1194 int n_elem) 1195 { 1196 struct hisi_sas_sge_page *sge_page = hisi_sas_sge_addr_mem(slot); 1197 struct scatterlist *sg; 1198 int i; 1199 1200 for_each_sg(scatter, sg, n_elem, i) { 1201 struct hisi_sas_sge *entry = &sge_page->sge[i]; 1202 1203 entry->addr = cpu_to_le64(sg_dma_address(sg)); 1204 entry->page_ctrl_0 = entry->page_ctrl_1 = 0; 1205 entry->data_len = cpu_to_le32(sg_dma_len(sg)); 1206 entry->data_off = 0; 1207 } 1208 1209 hdr->prd_table_addr = cpu_to_le64(hisi_sas_sge_addr_dma(slot)); 1210 1211 hdr->sg_len |= cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF); 1212 } 1213 1214 static void prep_prd_sge_dif_v3_hw(struct hisi_hba *hisi_hba, 1215 struct hisi_sas_slot *slot, 1216 struct hisi_sas_cmd_hdr *hdr, 1217 struct scatterlist *scatter, 1218 int n_elem) 1219 { 1220 struct hisi_sas_sge_dif_page *sge_dif_page; 1221 struct scatterlist *sg; 1222 int i; 1223 1224 sge_dif_page = hisi_sas_sge_dif_addr_mem(slot); 1225 1226 for_each_sg(scatter, sg, n_elem, i) { 1227 struct hisi_sas_sge *entry = &sge_dif_page->sge[i]; 1228 1229 entry->addr = cpu_to_le64(sg_dma_address(sg)); 1230 entry->page_ctrl_0 = 0; 1231 entry->page_ctrl_1 = 0; 1232 entry->data_len = cpu_to_le32(sg_dma_len(sg)); 1233 entry->data_off = 0; 1234 } 1235 1236 hdr->dif_prd_table_addr = 1237 cpu_to_le64(hisi_sas_sge_dif_addr_dma(slot)); 1238 1239 hdr->sg_len |= cpu_to_le32(n_elem << CMD_HDR_DIF_SGL_LEN_OFF); 1240 } 1241 1242 static u32 get_prot_chk_msk_v3_hw(struct scsi_cmnd *scsi_cmnd) 1243 { 1244 unsigned char prot_flags = scsi_cmnd->prot_flags; 1245 1246 if (prot_flags & SCSI_PROT_REF_CHECK) 1247 return T10_CHK_APP_TAG_MSK; 1248 return T10_CHK_REF_TAG_MSK | T10_CHK_APP_TAG_MSK; 1249 } 1250 1251 static void fill_prot_v3_hw(struct scsi_cmnd *scsi_cmnd, 1252 struct hisi_sas_protect_iu_v3_hw *prot) 1253 { 1254 unsigned char prot_op = scsi_get_prot_op(scsi_cmnd); 1255 unsigned int interval = scsi_prot_interval(scsi_cmnd); 1256 u32 lbrt_chk_val = t10_pi_ref_tag(scsi_cmd_to_rq(scsi_cmnd)); 1257 1258 switch (prot_op) { 1259 case SCSI_PROT_READ_INSERT: 1260 prot->dw0 |= T10_INSRT_EN_MSK; 1261 prot->lbrtgv = lbrt_chk_val; 1262 break; 1263 case SCSI_PROT_READ_STRIP: 1264 prot->dw0 |= (T10_RMV_EN_MSK | T10_CHK_EN_MSK); 1265 prot->lbrtcv = lbrt_chk_val; 1266 prot->dw4 |= get_prot_chk_msk_v3_hw(scsi_cmnd); 1267 break; 1268 case SCSI_PROT_READ_PASS: 1269 prot->dw0 |= T10_CHK_EN_MSK; 1270 prot->lbrtcv = lbrt_chk_val; 1271 prot->dw4 |= get_prot_chk_msk_v3_hw(scsi_cmnd); 1272 break; 1273 case SCSI_PROT_WRITE_INSERT: 1274 prot->dw0 |= T10_INSRT_EN_MSK; 1275 prot->lbrtgv = lbrt_chk_val; 1276 break; 1277 case SCSI_PROT_WRITE_STRIP: 1278 prot->dw0 |= (T10_RMV_EN_MSK | T10_CHK_EN_MSK); 1279 prot->lbrtcv = lbrt_chk_val; 1280 break; 1281 case SCSI_PROT_WRITE_PASS: 1282 prot->dw0 |= T10_CHK_EN_MSK; 1283 prot->lbrtcv = lbrt_chk_val; 1284 prot->dw4 |= get_prot_chk_msk_v3_hw(scsi_cmnd); 1285 break; 1286 default: 1287 WARN(1, "prot_op(0x%x) is not valid\n", prot_op); 1288 break; 1289 } 1290 1291 switch (interval) { 1292 case 512: 1293 break; 1294 case 4096: 1295 prot->dw0 |= (0x1 << USR_DATA_BLOCK_SZ_OFF); 1296 break; 1297 case 520: 1298 prot->dw0 |= (0x2 << USR_DATA_BLOCK_SZ_OFF); 1299 break; 1300 default: 1301 WARN(1, "protection interval (0x%x) invalid\n", 1302 interval); 1303 break; 1304 } 1305 1306 prot->dw0 |= INCR_LBRT_MSK; 1307 } 1308 1309 static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba, 1310 struct hisi_sas_slot *slot) 1311 { 1312 struct sas_task *task = slot->task; 1313 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; 1314 struct domain_device *device = task->dev; 1315 struct hisi_sas_device *sas_dev = device->lldd_dev; 1316 struct hisi_sas_port *port = slot->port; 1317 struct sas_ssp_task *ssp_task = &task->ssp_task; 1318 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; 1319 struct sas_tmf_task *tmf = slot->tmf; 1320 int has_data = 0, priority = !!tmf; 1321 unsigned char prot_op; 1322 u8 *buf_cmd; 1323 u32 dw1 = 0, dw2 = 0, len = 0; 1324 1325 hdr->dw0 = cpu_to_le32((1 << CMD_HDR_RESP_REPORT_OFF) | 1326 (2 << CMD_HDR_TLR_CTRL_OFF) | 1327 (port->id << CMD_HDR_PORT_OFF) | 1328 (priority << CMD_HDR_PRIORITY_OFF) | 1329 (1 << CMD_HDR_CMD_OFF)); /* ssp */ 1330 1331 dw1 = 1 << CMD_HDR_VDTL_OFF; 1332 if (tmf) { 1333 dw1 |= 2 << CMD_HDR_FRAME_TYPE_OFF; 1334 dw1 |= DIR_NO_DATA << CMD_HDR_DIR_OFF; 1335 } else { 1336 prot_op = scsi_get_prot_op(scsi_cmnd); 1337 dw1 |= 1 << CMD_HDR_FRAME_TYPE_OFF; 1338 switch (scsi_cmnd->sc_data_direction) { 1339 case DMA_TO_DEVICE: 1340 has_data = 1; 1341 dw1 |= DIR_TO_DEVICE << CMD_HDR_DIR_OFF; 1342 break; 1343 case DMA_FROM_DEVICE: 1344 has_data = 1; 1345 dw1 |= DIR_TO_INI << CMD_HDR_DIR_OFF; 1346 break; 1347 default: 1348 dw1 &= ~CMD_HDR_DIR_MSK; 1349 } 1350 } 1351 1352 /* map itct entry */ 1353 dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF; 1354 1355 dw2 = (((sizeof(struct ssp_command_iu) + sizeof(struct ssp_frame_hdr) + 1356 3) / BYTE_TO_DW) << CMD_HDR_CFL_OFF) | 1357 ((HISI_SAS_MAX_SSP_RESP_SZ / BYTE_TO_DW) << CMD_HDR_MRFL_OFF) | 1358 (HDR_SG_MOD << CMD_HDR_SG_MOD_OFF); 1359 hdr->dw2 = cpu_to_le32(dw2); 1360 hdr->transfer_tags = cpu_to_le32(slot->idx); 1361 1362 if (has_data) { 1363 prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter, 1364 slot->n_elem); 1365 1366 if (scsi_prot_sg_count(scsi_cmnd)) 1367 prep_prd_sge_dif_v3_hw(hisi_hba, slot, hdr, 1368 scsi_prot_sglist(scsi_cmnd), 1369 slot->n_elem_dif); 1370 } 1371 1372 hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot)); 1373 hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot)); 1374 1375 buf_cmd = hisi_sas_cmd_hdr_addr_mem(slot) + 1376 sizeof(struct ssp_frame_hdr); 1377 1378 memcpy(buf_cmd, &task->ssp_task.LUN, LUN_SIZE); 1379 if (!tmf) { 1380 buf_cmd[ATTR_PRIO_REGION] = ssp_task->task_attr; 1381 memcpy(buf_cmd + CDB_REGION, scsi_cmnd->cmnd, 1382 scsi_cmnd->cmd_len); 1383 } else { 1384 buf_cmd[TMF_REGION] = tmf->tmf; 1385 switch (tmf->tmf) { 1386 case TMF_ABORT_TASK: 1387 case TMF_QUERY_TASK: 1388 buf_cmd[TAG_MSB] = 1389 (tmf->tag_of_task_to_be_managed >> 8) & 0xff; 1390 buf_cmd[TAG_LSB] = 1391 tmf->tag_of_task_to_be_managed & 0xff; 1392 break; 1393 default: 1394 break; 1395 } 1396 } 1397 1398 if (has_data && (prot_op != SCSI_PROT_NORMAL)) { 1399 struct hisi_sas_protect_iu_v3_hw prot; 1400 u8 *buf_cmd_prot; 1401 1402 hdr->dw7 |= cpu_to_le32(1 << CMD_HDR_ADDR_MODE_SEL_OFF); 1403 dw1 |= CMD_HDR_PIR_MSK; 1404 buf_cmd_prot = hisi_sas_cmd_hdr_addr_mem(slot) + 1405 sizeof(struct ssp_frame_hdr) + 1406 sizeof(struct ssp_command_iu); 1407 1408 memset(&prot, 0, sizeof(struct hisi_sas_protect_iu_v3_hw)); 1409 fill_prot_v3_hw(scsi_cmnd, &prot); 1410 memcpy(buf_cmd_prot, &prot, 1411 sizeof(struct hisi_sas_protect_iu_v3_hw)); 1412 /* 1413 * For READ, we need length of info read to memory, while for 1414 * WRITE we need length of data written to the disk. 1415 */ 1416 if (prot_op == SCSI_PROT_WRITE_INSERT || 1417 prot_op == SCSI_PROT_READ_INSERT || 1418 prot_op == SCSI_PROT_WRITE_PASS || 1419 prot_op == SCSI_PROT_READ_PASS) { 1420 unsigned int interval = scsi_prot_interval(scsi_cmnd); 1421 unsigned int ilog2_interval = ilog2(interval); 1422 1423 len = (task->total_xfer_len >> ilog2_interval) * 1424 BYTE_TO_DDW; 1425 } 1426 } 1427 1428 hdr->dw1 = cpu_to_le32(dw1); 1429 1430 hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len + len); 1431 } 1432 1433 static void prep_smp_v3_hw(struct hisi_hba *hisi_hba, 1434 struct hisi_sas_slot *slot) 1435 { 1436 struct sas_task *task = slot->task; 1437 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; 1438 struct domain_device *device = task->dev; 1439 struct hisi_sas_port *port = slot->port; 1440 struct scatterlist *sg_req; 1441 struct hisi_sas_device *sas_dev = device->lldd_dev; 1442 dma_addr_t req_dma_addr; 1443 unsigned int req_len; 1444 u32 cfl; 1445 1446 /* req */ 1447 sg_req = &task->smp_task.smp_req; 1448 req_len = sg_dma_len(sg_req); 1449 req_dma_addr = sg_dma_address(sg_req); 1450 1451 /* create header */ 1452 /* dw0 */ 1453 hdr->dw0 = cpu_to_le32((port->id << CMD_HDR_PORT_OFF) | 1454 (1 << CMD_HDR_PRIORITY_OFF) | /* high pri */ 1455 (SMP_FRAME_TYPE << CMD_HDR_CMD_OFF)); /* smp */ 1456 1457 /* map itct entry */ 1458 hdr->dw1 = cpu_to_le32((sas_dev->device_id << CMD_HDR_DEV_ID_OFF) | 1459 (1 << CMD_HDR_FRAME_TYPE_OFF) | 1460 (DIR_NO_DATA << CMD_HDR_DIR_OFF)); 1461 1462 /* dw2 */ 1463 cfl = (req_len - SMP_CRC_SIZE) / BYTE_TO_DW; 1464 hdr->dw2 = cpu_to_le32((cfl << CMD_HDR_CFL_OFF) | 1465 (HISI_SAS_MAX_SMP_RESP_SZ / BYTE_TO_DW << 1466 CMD_HDR_MRFL_OFF)); 1467 1468 hdr->transfer_tags = cpu_to_le32(slot->idx << CMD_HDR_IPTT_OFF); 1469 1470 hdr->cmd_table_addr = cpu_to_le64(req_dma_addr); 1471 hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot)); 1472 } 1473 1474 static void prep_ata_v3_hw(struct hisi_hba *hisi_hba, 1475 struct hisi_sas_slot *slot) 1476 { 1477 struct sas_task *task = slot->task; 1478 struct domain_device *device = task->dev; 1479 struct domain_device *parent_dev = device->parent; 1480 struct hisi_sas_device *sas_dev = device->lldd_dev; 1481 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; 1482 struct asd_sas_port *sas_port = device->port; 1483 struct hisi_sas_port *port = to_hisi_sas_port(sas_port); 1484 int phy_id; 1485 u8 *buf_cmd; 1486 int has_data = 0, hdr_tag = 0; 1487 u32 dw1 = 0, dw2 = 0; 1488 1489 hdr->dw0 = cpu_to_le32(port->id << CMD_HDR_PORT_OFF); 1490 if (parent_dev && dev_is_expander(parent_dev->dev_type)) { 1491 hdr->dw0 |= cpu_to_le32(3 << CMD_HDR_CMD_OFF); 1492 } else { 1493 phy_id = device->phy->identify.phy_identifier; 1494 hdr->dw0 |= cpu_to_le32((1U << phy_id) 1495 << CMD_HDR_PHY_ID_OFF); 1496 hdr->dw0 |= CMD_HDR_FORCE_PHY_MSK; 1497 hdr->dw0 |= cpu_to_le32(4U << CMD_HDR_CMD_OFF); 1498 } 1499 1500 switch (task->data_dir) { 1501 case DMA_TO_DEVICE: 1502 has_data = 1; 1503 dw1 |= DIR_TO_DEVICE << CMD_HDR_DIR_OFF; 1504 break; 1505 case DMA_FROM_DEVICE: 1506 has_data = 1; 1507 dw1 |= DIR_TO_INI << CMD_HDR_DIR_OFF; 1508 break; 1509 default: 1510 dw1 &= ~CMD_HDR_DIR_MSK; 1511 } 1512 1513 if ((task->ata_task.fis.command == ATA_CMD_DEV_RESET) && 1514 (task->ata_task.fis.control & ATA_SRST)) 1515 dw1 |= 1 << CMD_HDR_RESET_OFF; 1516 1517 dw1 |= (hisi_sas_get_ata_protocol(task)) << CMD_HDR_FRAME_TYPE_OFF; 1518 dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF; 1519 1520 if (FIS_CMD_IS_UNCONSTRAINED(task->ata_task.fis)) 1521 dw1 |= 1 << CMD_HDR_UNCON_CMD_OFF; 1522 1523 hdr->dw1 = cpu_to_le32(dw1); 1524 1525 /* dw2 */ 1526 if (task->ata_task.use_ncq) { 1527 struct ata_queued_cmd *qc = task->uldd_task; 1528 1529 hdr_tag = qc->tag; 1530 task->ata_task.fis.sector_count |= 1531 (u8)(hdr_tag << HDR_TAG_OFF); 1532 dw2 |= hdr_tag << CMD_HDR_NCQ_TAG_OFF; 1533 } 1534 1535 dw2 |= (HISI_SAS_MAX_STP_RESP_SZ / BYTE_TO_DW) << CMD_HDR_CFL_OFF | 1536 HDR_SG_MOD << CMD_HDR_SG_MOD_OFF; 1537 hdr->dw2 = cpu_to_le32(dw2); 1538 1539 /* dw3 */ 1540 hdr->transfer_tags = cpu_to_le32(slot->idx); 1541 1542 if (has_data) 1543 prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter, 1544 slot->n_elem); 1545 1546 hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len); 1547 hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot)); 1548 hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot)); 1549 1550 buf_cmd = hisi_sas_cmd_hdr_addr_mem(slot); 1551 1552 if (likely(!task->ata_task.device_control_reg_update)) 1553 task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ 1554 /* fill in command FIS */ 1555 memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); 1556 } 1557 1558 static void prep_abort_v3_hw(struct hisi_hba *hisi_hba, 1559 struct hisi_sas_slot *slot) 1560 { 1561 struct sas_task *task = slot->task; 1562 struct sas_internal_abort_task *abort = &task->abort_task; 1563 struct domain_device *dev = task->dev; 1564 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; 1565 struct hisi_sas_port *port = slot->port; 1566 struct hisi_sas_device *sas_dev = dev->lldd_dev; 1567 bool sata = dev_is_sata(dev); 1568 1569 /* dw0 */ 1570 hdr->dw0 = cpu_to_le32((5U << CMD_HDR_CMD_OFF) | /* abort */ 1571 (port->id << CMD_HDR_PORT_OFF) | 1572 (sata << CMD_HDR_ABORT_DEVICE_TYPE_OFF) | 1573 (abort->type << CMD_HDR_ABORT_FLAG_OFF)); 1574 1575 /* dw1 */ 1576 hdr->dw1 = cpu_to_le32(sas_dev->device_id 1577 << CMD_HDR_DEV_ID_OFF); 1578 1579 /* dw7 */ 1580 hdr->dw7 = cpu_to_le32(abort->tag << CMD_HDR_ABORT_IPTT_OFF); 1581 hdr->transfer_tags = cpu_to_le32(slot->idx); 1582 } 1583 1584 static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba) 1585 { 1586 int i; 1587 irqreturn_t res; 1588 u32 context, port_id, link_rate; 1589 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1590 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1591 struct device *dev = hisi_hba->dev; 1592 1593 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1); 1594 1595 port_id = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA); 1596 port_id = (port_id >> (HISI_SAS_REG_MEM_SIZE * phy_no)) & 0xf; 1597 link_rate = hisi_sas_read32(hisi_hba, PHY_CONN_RATE); 1598 link_rate = (link_rate >> (phy_no * HISI_SAS_REG_MEM_SIZE)) & 0xf; 1599 1600 if (port_id == 0xf) { 1601 dev_err(dev, "phyup: phy%d invalid portid\n", phy_no); 1602 res = IRQ_NONE; 1603 goto end; 1604 } 1605 sas_phy->linkrate = link_rate; 1606 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); 1607 1608 /* Check for SATA dev */ 1609 context = hisi_sas_read32(hisi_hba, PHY_CONTEXT); 1610 if (context & (1 << phy_no)) { 1611 struct hisi_sas_initial_fis *initial_fis; 1612 struct dev_to_host_fis *fis; 1613 u8 attached_sas_addr[SAS_ADDR_SIZE] = {0}; 1614 struct Scsi_Host *shost = hisi_hba->shost; 1615 1616 dev_info(dev, "phyup: phy%d link_rate=%d(sata)\n", phy_no, link_rate); 1617 initial_fis = &hisi_hba->initial_fis[phy_no]; 1618 fis = &initial_fis->fis; 1619 1620 /* check ERR bit of Status Register */ 1621 if (fis->status & ATA_ERR) { 1622 dev_warn(dev, "sata int: phy%d FIS status: 0x%x\n", 1623 phy_no, fis->status); 1624 hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); 1625 res = IRQ_NONE; 1626 goto end; 1627 } 1628 1629 sas_phy->oob_mode = SATA_OOB_MODE; 1630 attached_sas_addr[0] = 0x50; 1631 attached_sas_addr[HOST_NO_OFF] = shost->host_no; 1632 attached_sas_addr[PHY_NO_OFF] = phy_no; 1633 memcpy(sas_phy->attached_sas_addr, 1634 attached_sas_addr, 1635 SAS_ADDR_SIZE); 1636 memcpy(sas_phy->frame_rcvd, fis, 1637 sizeof(struct dev_to_host_fis)); 1638 phy->phy_type |= PORT_TYPE_SATA; 1639 phy->identify.device_type = SAS_SATA_DEV; 1640 phy->frame_rcvd_size = sizeof(struct dev_to_host_fis); 1641 phy->identify.target_port_protocols = SAS_PROTOCOL_SATA; 1642 } else { 1643 u32 *frame_rcvd = (u32 *)sas_phy->frame_rcvd; 1644 struct sas_identify_frame *id = 1645 (struct sas_identify_frame *)frame_rcvd; 1646 1647 dev_info(dev, "phyup: phy%d link_rate=%d\n", phy_no, link_rate); 1648 for (i = 0; i < IDENTIFY_REG_READ; i++) { 1649 u32 idaf = hisi_sas_phy_read32(hisi_hba, phy_no, 1650 RX_IDAF_DWORD0 + (i * 4)); 1651 frame_rcvd[i] = __swab32(idaf); 1652 } 1653 sas_phy->oob_mode = SAS_OOB_MODE; 1654 memcpy(sas_phy->attached_sas_addr, 1655 &id->sas_addr, 1656 SAS_ADDR_SIZE); 1657 phy->phy_type |= PORT_TYPE_SAS; 1658 phy->identify.device_type = id->dev_type; 1659 phy->frame_rcvd_size = sizeof(struct sas_identify_frame); 1660 if (phy->identify.device_type == SAS_END_DEVICE) 1661 phy->identify.target_port_protocols = 1662 SAS_PROTOCOL_SSP; 1663 else if (phy->identify.device_type != SAS_PHY_UNUSED) 1664 phy->identify.target_port_protocols = 1665 SAS_PROTOCOL_SMP; 1666 } 1667 1668 phy->port_id = port_id; 1669 spin_lock(&phy->lock); 1670 /* Delete timer and set phy_attached atomically */ 1671 timer_delete(&phy->timer); 1672 phy->phy_attached = 1; 1673 spin_unlock(&phy->lock); 1674 1675 /* 1676 * Call pm_runtime_get_noresume() which pairs with 1677 * hisi_sas_phyup_pm_work() -> pm_runtime_put_sync(). 1678 * For failure call pm_runtime_put() as we are in a hardirq context. 1679 */ 1680 pm_runtime_get_noresume(dev); 1681 res = hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP_PM); 1682 if (!res) 1683 pm_runtime_put(dev); 1684 1685 res = IRQ_HANDLED; 1686 1687 end: 1688 if (phy->reset_completion) 1689 complete(phy->reset_completion); 1690 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, 1691 CHL_INT0_SL_PHY_ENABLE_MSK); 1692 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 0); 1693 1694 return res; 1695 } 1696 1697 static irqreturn_t phy_down_v3_hw(int phy_no, struct hisi_hba *hisi_hba) 1698 { 1699 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1700 u32 phy_state, sl_ctrl, txid_auto; 1701 struct device *dev = hisi_hba->dev; 1702 1703 atomic_inc(&phy->down_cnt); 1704 1705 timer_delete(&phy->timer); 1706 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1); 1707 1708 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE); 1709 dev_info(dev, "phydown: phy%d phy_state=0x%x\n", phy_no, phy_state); 1710 hisi_sas_phy_down(hisi_hba, phy_no, (phy_state & 1 << phy_no) ? 1 : 0, 1711 GFP_ATOMIC); 1712 1713 sl_ctrl = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL); 1714 hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, 1715 sl_ctrl&(~SL_CTA_MSK)); 1716 1717 txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO); 1718 hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO, 1719 txid_auto | CT3_MSK); 1720 1721 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, CHL_INT0_NOT_RDY_MSK); 1722 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 0); 1723 1724 return IRQ_HANDLED; 1725 } 1726 1727 static irqreturn_t phy_bcast_v3_hw(int phy_no, struct hisi_hba *hisi_hba) 1728 { 1729 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1730 u32 bcast_status; 1731 1732 hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1); 1733 bcast_status = hisi_sas_phy_read32(hisi_hba, phy_no, RX_PRIMS_STATUS); 1734 if (bcast_status & RX_BCAST_CHG_MSK) 1735 hisi_sas_phy_bcast(phy); 1736 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, 1737 CHL_INT0_SL_RX_BCST_ACK_MSK); 1738 hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0); 1739 1740 return IRQ_HANDLED; 1741 } 1742 1743 static irqreturn_t int_phy_up_down_bcast_v3_hw(int irq_no, void *p) 1744 { 1745 struct hisi_hba *hisi_hba = p; 1746 u32 irq_msk; 1747 int phy_no = 0; 1748 irqreturn_t res = IRQ_NONE; 1749 1750 irq_msk = hisi_sas_read32(hisi_hba, CHNL_INT_STATUS) 1751 & 0x11111111; 1752 while (irq_msk) { 1753 if (irq_msk & 1) { 1754 u32 irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, 1755 CHL_INT0); 1756 u32 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE); 1757 int rdy = phy_state & (1 << phy_no); 1758 1759 if (rdy) { 1760 if (irq_value & CHL_INT0_SL_PHY_ENABLE_MSK) 1761 /* phy up */ 1762 if (phy_up_v3_hw(phy_no, hisi_hba) 1763 == IRQ_HANDLED) 1764 res = IRQ_HANDLED; 1765 if (irq_value & CHL_INT0_SL_RX_BCST_ACK_MSK) 1766 /* phy bcast */ 1767 if (phy_bcast_v3_hw(phy_no, hisi_hba) 1768 == IRQ_HANDLED) 1769 res = IRQ_HANDLED; 1770 } else { 1771 if (irq_value & CHL_INT0_NOT_RDY_MSK) 1772 /* phy down */ 1773 if (phy_down_v3_hw(phy_no, hisi_hba) 1774 == IRQ_HANDLED) 1775 res = IRQ_HANDLED; 1776 } 1777 } 1778 irq_msk >>= 4; 1779 phy_no++; 1780 } 1781 1782 return res; 1783 } 1784 1785 static const struct hisi_sas_hw_error port_axi_error[] = { 1786 { 1787 .irq_msk = BIT(CHL_INT1_DMAC_TX_ECC_MB_ERR_OFF), 1788 .msg = "dmac_tx_ecc_bad_err", 1789 }, 1790 { 1791 .irq_msk = BIT(CHL_INT1_DMAC_RX_ECC_MB_ERR_OFF), 1792 .msg = "dmac_rx_ecc_bad_err", 1793 }, 1794 { 1795 .irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF), 1796 .msg = "dma_tx_axi_wr_err", 1797 }, 1798 { 1799 .irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_RD_ERR_OFF), 1800 .msg = "dma_tx_axi_rd_err", 1801 }, 1802 { 1803 .irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF), 1804 .msg = "dma_rx_axi_wr_err", 1805 }, 1806 { 1807 .irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF), 1808 .msg = "dma_rx_axi_rd_err", 1809 }, 1810 { 1811 .irq_msk = BIT(CHL_INT1_DMAC_TX_FIFO_ERR_OFF), 1812 .msg = "dma_tx_fifo_err", 1813 }, 1814 { 1815 .irq_msk = BIT(CHL_INT1_DMAC_RX_FIFO_ERR_OFF), 1816 .msg = "dma_rx_fifo_err", 1817 }, 1818 { 1819 .irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_RUSER_ERR_OFF), 1820 .msg = "dma_tx_axi_ruser_err", 1821 }, 1822 { 1823 .irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_RUSER_ERR_OFF), 1824 .msg = "dma_rx_axi_ruser_err", 1825 }, 1826 }; 1827 1828 static void handle_chl_int1_v3_hw(struct hisi_hba *hisi_hba, int phy_no) 1829 { 1830 u32 irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT1); 1831 u32 irq_msk = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT1_MSK); 1832 struct device *dev = hisi_hba->dev; 1833 int i; 1834 1835 irq_value &= ~irq_msk; 1836 if (!irq_value) { 1837 dev_warn(dev, "phy%d channel int 1 received with status bits cleared\n", 1838 phy_no); 1839 return; 1840 } 1841 1842 for (i = 0; i < ARRAY_SIZE(port_axi_error); i++) { 1843 const struct hisi_sas_hw_error *error = &port_axi_error[i]; 1844 1845 if (!(irq_value & error->irq_msk)) 1846 continue; 1847 1848 dev_err(dev, "%s error (phy%d 0x%x) found!\n", 1849 error->msg, phy_no, irq_value); 1850 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 1851 } 1852 1853 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT1, irq_value); 1854 } 1855 1856 static void phy_get_events_v3_hw(struct hisi_hba *hisi_hba, int phy_no) 1857 { 1858 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1859 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1860 struct sas_phy *sphy = sas_phy->phy; 1861 unsigned long flags; 1862 u32 reg_value; 1863 1864 spin_lock_irqsave(&phy->lock, flags); 1865 1866 /* loss dword sync */ 1867 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_DWS_LOST); 1868 sphy->loss_of_dword_sync_count += reg_value; 1869 1870 /* phy reset problem */ 1871 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_RESET_PROB); 1872 sphy->phy_reset_problem_count += reg_value; 1873 1874 /* invalid dword */ 1875 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_INVLD_DW); 1876 sphy->invalid_dword_count += reg_value; 1877 1878 /* disparity err */ 1879 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_DISP_ERR); 1880 sphy->running_disparity_error_count += reg_value; 1881 1882 /* code violation error */ 1883 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_CODE_ERR); 1884 phy->code_violation_err_count += reg_value; 1885 1886 spin_unlock_irqrestore(&phy->lock, flags); 1887 } 1888 1889 static void handle_chl_int2_v3_hw(struct hisi_hba *hisi_hba, int phy_no) 1890 { 1891 u32 irq_msk = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2_MSK); 1892 u32 irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2); 1893 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1894 struct pci_dev *pci_dev = hisi_hba->pci_dev; 1895 struct device *dev = hisi_hba->dev; 1896 static const u32 msk = BIT(CHL_INT2_RX_DISP_ERR_OFF) | 1897 BIT(CHL_INT2_RX_CODE_ERR_OFF) | 1898 BIT(CHL_INT2_RX_INVLD_DW_OFF); 1899 1900 irq_value &= ~irq_msk; 1901 if (!irq_value) { 1902 dev_warn(dev, "phy%d channel int 2 received with status bits cleared\n", 1903 phy_no); 1904 return; 1905 } 1906 1907 if (irq_value & BIT(CHL_INT2_SL_IDAF_TOUT_CONF_OFF)) { 1908 dev_warn(dev, "phy%d identify timeout\n", phy_no); 1909 hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); 1910 } 1911 1912 if (irq_value & BIT(CHL_INT2_STP_LINK_TIMEOUT_OFF)) { 1913 u32 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, 1914 STP_LINK_TIMEOUT_STATE); 1915 1916 dev_warn(dev, "phy%d stp link timeout (0x%x)\n", 1917 phy_no, reg_value); 1918 if (reg_value & BIT(LINK_RESET_TIMEOUT_OFF)) 1919 hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); 1920 } 1921 1922 if (pci_dev->revision > 0x20 && (irq_value & msk)) { 1923 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1924 struct sas_phy *sphy = sas_phy->phy; 1925 1926 phy_get_events_v3_hw(hisi_hba, phy_no); 1927 1928 if (irq_value & BIT(CHL_INT2_RX_INVLD_DW_OFF)) 1929 dev_info(dev, "phy%d invalid dword cnt: %u\n", phy_no, 1930 sphy->invalid_dword_count); 1931 1932 if (irq_value & BIT(CHL_INT2_RX_CODE_ERR_OFF)) 1933 dev_info(dev, "phy%d code violation cnt: %u\n", phy_no, 1934 phy->code_violation_err_count); 1935 1936 if (irq_value & BIT(CHL_INT2_RX_DISP_ERR_OFF)) 1937 dev_info(dev, "phy%d disparity error cnt: %u\n", phy_no, 1938 sphy->running_disparity_error_count); 1939 } 1940 1941 if ((irq_value & BIT(CHL_INT2_RX_INVLD_DW_OFF)) && 1942 (pci_dev->revision == 0x20)) { 1943 u32 reg_value; 1944 int rc; 1945 1946 rc = hisi_sas_read32_poll_timeout_atomic( 1947 HILINK_ERR_DFX, reg_value, 1948 !((reg_value >> 8) & BIT(phy_no)), 1949 1000, 10000); 1950 if (rc) 1951 hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); 1952 } 1953 1954 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2, irq_value); 1955 } 1956 1957 static void handle_chl_int0_v3_hw(struct hisi_hba *hisi_hba, int phy_no) 1958 { 1959 u32 irq_value0 = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT0); 1960 1961 if (irq_value0 & CHL_INT0_PHY_RDY_MSK) 1962 hisi_sas_phy_oob_ready(hisi_hba, phy_no); 1963 1964 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, 1965 irq_value0 & (~CHL_INT0_SL_RX_BCST_ACK_MSK) 1966 & (~CHL_INT0_SL_PHY_ENABLE_MSK) 1967 & (~CHL_INT0_NOT_RDY_MSK)); 1968 } 1969 1970 static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p) 1971 { 1972 struct hisi_hba *hisi_hba = p; 1973 u32 irq_msk; 1974 int phy_no = 0; 1975 1976 irq_msk = hisi_sas_read32(hisi_hba, CHNL_INT_STATUS) & CHNL_INT_STS_MSK; 1977 1978 while (irq_msk) { 1979 if (irq_msk & (CHNL_INT_STS_INT0_MSK << (phy_no * CHNL_WIDTH))) 1980 handle_chl_int0_v3_hw(hisi_hba, phy_no); 1981 1982 if (irq_msk & (CHNL_INT_STS_INT1_MSK << (phy_no * CHNL_WIDTH))) 1983 handle_chl_int1_v3_hw(hisi_hba, phy_no); 1984 1985 if (irq_msk & (CHNL_INT_STS_INT2_MSK << (phy_no * CHNL_WIDTH))) 1986 handle_chl_int2_v3_hw(hisi_hba, phy_no); 1987 1988 irq_msk &= ~(CHNL_INT_STS_PHY_MSK << (phy_no * CHNL_WIDTH)); 1989 phy_no++; 1990 } 1991 1992 return IRQ_HANDLED; 1993 } 1994 1995 static const struct hisi_sas_hw_error multi_bit_ecc_errors[] = { 1996 { 1997 .irq_msk = BIT(SAS_ECC_INTR_DQE_ECC_MB_OFF), 1998 .msk = HGC_DQE_ECC_MB_ADDR_MSK, 1999 .shift = HGC_DQE_ECC_MB_ADDR_OFF, 2000 .msg = "hgc_dqe_eccbad_intr", 2001 .reg = HGC_DQE_ECC_ADDR, 2002 }, 2003 { 2004 .irq_msk = BIT(SAS_ECC_INTR_IOST_ECC_MB_OFF), 2005 .msk = HGC_IOST_ECC_MB_ADDR_MSK, 2006 .shift = HGC_IOST_ECC_MB_ADDR_OFF, 2007 .msg = "hgc_iost_eccbad_intr", 2008 .reg = HGC_IOST_ECC_ADDR, 2009 }, 2010 { 2011 .irq_msk = BIT(SAS_ECC_INTR_ITCT_ECC_MB_OFF), 2012 .msk = HGC_ITCT_ECC_MB_ADDR_MSK, 2013 .shift = HGC_ITCT_ECC_MB_ADDR_OFF, 2014 .msg = "hgc_itct_eccbad_intr", 2015 .reg = HGC_ITCT_ECC_ADDR, 2016 }, 2017 { 2018 .irq_msk = BIT(SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF), 2019 .msk = HGC_LM_DFX_STATUS2_IOSTLIST_MSK, 2020 .shift = HGC_LM_DFX_STATUS2_IOSTLIST_OFF, 2021 .msg = "hgc_iostl_eccbad_intr", 2022 .reg = HGC_LM_DFX_STATUS2, 2023 }, 2024 { 2025 .irq_msk = BIT(SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF), 2026 .msk = HGC_LM_DFX_STATUS2_ITCTLIST_MSK, 2027 .shift = HGC_LM_DFX_STATUS2_ITCTLIST_OFF, 2028 .msg = "hgc_itctl_eccbad_intr", 2029 .reg = HGC_LM_DFX_STATUS2, 2030 }, 2031 { 2032 .irq_msk = BIT(SAS_ECC_INTR_CQE_ECC_MB_OFF), 2033 .msk = HGC_CQE_ECC_MB_ADDR_MSK, 2034 .shift = HGC_CQE_ECC_MB_ADDR_OFF, 2035 .msg = "hgc_cqe_eccbad_intr", 2036 .reg = HGC_CQE_ECC_ADDR, 2037 }, 2038 { 2039 .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF), 2040 .msk = HGC_RXM_DFX_STATUS14_MEM0_MSK, 2041 .shift = HGC_RXM_DFX_STATUS14_MEM0_OFF, 2042 .msg = "rxm_mem0_eccbad_intr", 2043 .reg = HGC_RXM_DFX_STATUS14, 2044 }, 2045 { 2046 .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF), 2047 .msk = HGC_RXM_DFX_STATUS14_MEM1_MSK, 2048 .shift = HGC_RXM_DFX_STATUS14_MEM1_OFF, 2049 .msg = "rxm_mem1_eccbad_intr", 2050 .reg = HGC_RXM_DFX_STATUS14, 2051 }, 2052 { 2053 .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF), 2054 .msk = HGC_RXM_DFX_STATUS14_MEM2_MSK, 2055 .shift = HGC_RXM_DFX_STATUS14_MEM2_OFF, 2056 .msg = "rxm_mem2_eccbad_intr", 2057 .reg = HGC_RXM_DFX_STATUS14, 2058 }, 2059 { 2060 .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF), 2061 .msk = HGC_RXM_DFX_STATUS15_MEM3_MSK, 2062 .shift = HGC_RXM_DFX_STATUS15_MEM3_OFF, 2063 .msg = "rxm_mem3_eccbad_intr", 2064 .reg = HGC_RXM_DFX_STATUS15, 2065 }, 2066 { 2067 .irq_msk = BIT(SAS_ECC_INTR_OOO_RAM_ECC_MB_OFF), 2068 .msk = AM_ROB_ECC_ERR_ADDR_MSK, 2069 .shift = AM_ROB_ECC_ERR_ADDR_OFF, 2070 .msg = "ooo_ram_eccbad_intr", 2071 .reg = AM_ROB_ECC_ERR_ADDR, 2072 }, 2073 }; 2074 2075 static void multi_bit_ecc_error_process_v3_hw(struct hisi_hba *hisi_hba, 2076 u32 irq_value) 2077 { 2078 struct device *dev = hisi_hba->dev; 2079 const struct hisi_sas_hw_error *ecc_error; 2080 u32 val; 2081 int i; 2082 2083 for (i = 0; i < ARRAY_SIZE(multi_bit_ecc_errors); i++) { 2084 ecc_error = &multi_bit_ecc_errors[i]; 2085 if (irq_value & ecc_error->irq_msk) { 2086 val = hisi_sas_read32(hisi_hba, ecc_error->reg); 2087 val &= ecc_error->msk; 2088 val >>= ecc_error->shift; 2089 dev_err(dev, "%s (0x%x) found: mem addr is 0x%08X\n", 2090 ecc_error->msg, irq_value, val); 2091 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 2092 } 2093 } 2094 } 2095 2096 static void fatal_ecc_int_v3_hw(struct hisi_hba *hisi_hba) 2097 { 2098 u32 irq_value, irq_msk; 2099 2100 irq_msk = hisi_sas_read32(hisi_hba, SAS_ECC_INTR_MSK); 2101 hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xffffffff); 2102 2103 irq_value = hisi_sas_read32(hisi_hba, SAS_ECC_INTR); 2104 if (irq_value) 2105 multi_bit_ecc_error_process_v3_hw(hisi_hba, irq_value); 2106 2107 hisi_sas_write32(hisi_hba, SAS_ECC_INTR, irq_value); 2108 hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, irq_msk); 2109 } 2110 2111 static const struct hisi_sas_hw_error axi_error[] = { 2112 { .msk = BIT(0), .msg = "IOST_AXI_W_ERR" }, 2113 { .msk = BIT(1), .msg = "IOST_AXI_R_ERR" }, 2114 { .msk = BIT(2), .msg = "ITCT_AXI_W_ERR" }, 2115 { .msk = BIT(3), .msg = "ITCT_AXI_R_ERR" }, 2116 { .msk = BIT(4), .msg = "SATA_AXI_W_ERR" }, 2117 { .msk = BIT(5), .msg = "SATA_AXI_R_ERR" }, 2118 { .msk = BIT(6), .msg = "DQE_AXI_R_ERR" }, 2119 { .msk = BIT(7), .msg = "CQE_AXI_W_ERR" }, 2120 {} 2121 }; 2122 2123 static const struct hisi_sas_hw_error fifo_error[] = { 2124 { .msk = BIT(8), .msg = "CQE_WINFO_FIFO" }, 2125 { .msk = BIT(9), .msg = "CQE_MSG_FIFIO" }, 2126 { .msk = BIT(10), .msg = "GETDQE_FIFO" }, 2127 { .msk = BIT(11), .msg = "CMDP_FIFO" }, 2128 { .msk = BIT(12), .msg = "AWTCTRL_FIFO" }, 2129 {} 2130 }; 2131 2132 static const struct hisi_sas_hw_error fatal_axi_error[] = { 2133 { 2134 .irq_msk = BIT(ENT_INT_SRC3_WP_DEPTH_OFF), 2135 .msg = "write pointer and depth", 2136 }, 2137 { 2138 .irq_msk = BIT(ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF), 2139 .msg = "iptt no match slot", 2140 }, 2141 { 2142 .irq_msk = BIT(ENT_INT_SRC3_RP_DEPTH_OFF), 2143 .msg = "read pointer and depth", 2144 }, 2145 { 2146 .irq_msk = BIT(ENT_INT_SRC3_AXI_OFF), 2147 .reg = HGC_AXI_FIFO_ERR_INFO, 2148 .sub = axi_error, 2149 }, 2150 { 2151 .irq_msk = BIT(ENT_INT_SRC3_FIFO_OFF), 2152 .reg = HGC_AXI_FIFO_ERR_INFO, 2153 .sub = fifo_error, 2154 }, 2155 { 2156 .irq_msk = BIT(ENT_INT_SRC3_LM_OFF), 2157 .msg = "LM add/fetch list", 2158 }, 2159 { 2160 .irq_msk = BIT(ENT_INT_SRC3_ABT_OFF), 2161 .msg = "SAS_HGC_ABT fetch LM list", 2162 }, 2163 { 2164 .irq_msk = BIT(ENT_INT_SRC3_DQE_POISON_OFF), 2165 .msg = "read dqe poison", 2166 }, 2167 { 2168 .irq_msk = BIT(ENT_INT_SRC3_IOST_POISON_OFF), 2169 .msg = "read iost poison", 2170 }, 2171 { 2172 .irq_msk = BIT(ENT_INT_SRC3_ITCT_POISON_OFF), 2173 .msg = "read itct poison", 2174 }, 2175 { 2176 .irq_msk = BIT(ENT_INT_SRC3_ITCT_NCQ_POISON_OFF), 2177 .msg = "read itct ncq poison", 2178 }, 2179 2180 }; 2181 2182 static irqreturn_t fatal_axi_int_v3_hw(int irq_no, void *p) 2183 { 2184 u32 irq_value, irq_msk; 2185 struct hisi_hba *hisi_hba = p; 2186 struct device *dev = hisi_hba->dev; 2187 struct pci_dev *pdev = hisi_hba->pci_dev; 2188 int i; 2189 2190 irq_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3); 2191 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk | 0x1df00); 2192 2193 irq_value = hisi_sas_read32(hisi_hba, ENT_INT_SRC3); 2194 irq_value &= ~irq_msk; 2195 2196 for (i = 0; i < ARRAY_SIZE(fatal_axi_error); i++) { 2197 const struct hisi_sas_hw_error *error = &fatal_axi_error[i]; 2198 2199 if (!(irq_value & error->irq_msk)) 2200 continue; 2201 2202 if (error->sub) { 2203 const struct hisi_sas_hw_error *sub = error->sub; 2204 u32 err_value = hisi_sas_read32(hisi_hba, error->reg); 2205 2206 for (; sub->msk || sub->msg; sub++) { 2207 if (!(err_value & sub->msk)) 2208 continue; 2209 2210 dev_err(dev, "%s error (0x%x) found!\n", 2211 sub->msg, irq_value); 2212 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 2213 } 2214 } else { 2215 dev_err(dev, "%s error (0x%x) found!\n", 2216 error->msg, irq_value); 2217 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 2218 } 2219 2220 if (pdev->revision < 0x21) { 2221 u32 reg_val; 2222 2223 reg_val = hisi_sas_read32(hisi_hba, 2224 AXI_MASTER_CFG_BASE + 2225 AM_CTRL_GLOBAL); 2226 reg_val |= AM_CTRL_SHUTDOWN_REQ_MSK; 2227 hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE + 2228 AM_CTRL_GLOBAL, reg_val); 2229 } 2230 } 2231 2232 fatal_ecc_int_v3_hw(hisi_hba); 2233 2234 if (irq_value & BIT(ENT_INT_SRC3_ITC_INT_OFF)) { 2235 u32 reg_val = hisi_sas_read32(hisi_hba, ITCT_CLR); 2236 u32 dev_id = reg_val & ITCT_DEV_MSK; 2237 struct hisi_sas_device *sas_dev = 2238 &hisi_hba->devices[dev_id]; 2239 2240 hisi_sas_write32(hisi_hba, ITCT_CLR, 0); 2241 dev_dbg(dev, "clear ITCT ok\n"); 2242 complete(sas_dev->completion); 2243 } 2244 2245 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, irq_value & 0x1df00); 2246 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk); 2247 2248 return IRQ_HANDLED; 2249 } 2250 2251 static bool is_ncq_err_v3_hw(struct hisi_sas_complete_v3_hdr *complete_hdr) 2252 { 2253 u32 dw0, dw3; 2254 2255 dw0 = le32_to_cpu(complete_hdr->dw0); 2256 dw3 = le32_to_cpu(complete_hdr->dw3); 2257 2258 return (dw0 & ERR_PHASE_RESPONSE_FRAME_REV_STAGE_MSK) && 2259 (dw3 & FIS_TYPE_SDB_MSK) && 2260 (dw3 & FIS_ATA_STATUS_ERR_MSK); 2261 } 2262 2263 static bool 2264 slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task, 2265 struct hisi_sas_slot *slot) 2266 { 2267 struct task_status_struct *ts = &task->task_status; 2268 struct hisi_sas_complete_v3_hdr *complete_queue = 2269 hisi_hba->complete_hdr[slot->cmplt_queue]; 2270 struct hisi_sas_complete_v3_hdr *complete_hdr = 2271 &complete_queue[slot->cmplt_queue_slot]; 2272 struct hisi_sas_err_record_v3 *record = 2273 hisi_sas_status_buf_addr_mem(slot); 2274 u32 dma_rx_err_type = le32_to_cpu(record->dma_rx_err_type); 2275 u32 trans_tx_fail_type = le32_to_cpu(record->trans_tx_fail_type); 2276 u16 sipc_rx_err_type = le16_to_cpu(record->sipc_rx_err_type); 2277 u32 dw3 = le32_to_cpu(complete_hdr->dw3); 2278 u32 dw0 = le32_to_cpu(complete_hdr->dw0); 2279 2280 switch (task->task_proto) { 2281 case SAS_PROTOCOL_SSP: 2282 if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) { 2283 /* 2284 * If returned response frame is incorrect because of data underflow, 2285 * but I/O information has been written to the host memory, we examine 2286 * response IU. 2287 */ 2288 if (!(dw0 & CMPLT_HDR_RSPNS_GOOD_MSK) && 2289 (dw0 & CMPLT_HDR_RSPNS_XFRD_MSK)) 2290 return false; 2291 2292 ts->residual = trans_tx_fail_type; 2293 ts->stat = SAS_DATA_UNDERRUN; 2294 } else if (dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) { 2295 ts->stat = SAS_QUEUE_FULL; 2296 slot->abort = 1; 2297 } else { 2298 ts->stat = SAS_OPEN_REJECT; 2299 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; 2300 } 2301 break; 2302 case SAS_PROTOCOL_SATA: 2303 case SAS_PROTOCOL_STP: 2304 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: 2305 if ((dw0 & CMPLT_HDR_RSPNS_XFRD_MSK) && 2306 (sipc_rx_err_type & RX_FIS_STATUS_ERR_MSK)) { 2307 if (task->ata_task.use_ncq) { 2308 struct domain_device *device = task->dev; 2309 struct hisi_sas_device *sas_dev = device->lldd_dev; 2310 2311 sas_dev->dev_status = HISI_SAS_DEV_NCQ_ERR; 2312 slot->abort = 1; 2313 } else { 2314 ts->stat = SAS_PROTO_RESPONSE; 2315 } 2316 } else if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) { 2317 ts->residual = trans_tx_fail_type; 2318 ts->stat = SAS_DATA_UNDERRUN; 2319 } else if ((dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) || 2320 (dw3 & SATA_DISK_IN_ERROR_STATUS_MSK)) { 2321 ts->stat = SAS_PHY_DOWN; 2322 slot->abort = 1; 2323 } else { 2324 ts->stat = SAS_OPEN_REJECT; 2325 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; 2326 } 2327 if (dw0 & CMPLT_HDR_RSPNS_XFRD_MSK) 2328 hisi_sas_sata_done(task, slot); 2329 break; 2330 case SAS_PROTOCOL_SMP: 2331 ts->stat = SAS_SAM_STAT_CHECK_CONDITION; 2332 break; 2333 default: 2334 break; 2335 } 2336 return true; 2337 } 2338 2339 static void slot_complete_v3_hw(struct hisi_hba *hisi_hba, 2340 struct hisi_sas_slot *slot) 2341 { 2342 struct sas_task *task = slot->task; 2343 struct hisi_sas_device *sas_dev; 2344 struct device *dev = hisi_hba->dev; 2345 struct task_status_struct *ts; 2346 struct domain_device *device; 2347 struct sas_ha_struct *ha; 2348 struct hisi_sas_complete_v3_hdr *complete_queue = 2349 hisi_hba->complete_hdr[slot->cmplt_queue]; 2350 struct hisi_sas_complete_v3_hdr *complete_hdr = 2351 &complete_queue[slot->cmplt_queue_slot]; 2352 unsigned long flags; 2353 bool is_internal = slot->is_internal; 2354 u32 dw0, dw1, dw3; 2355 2356 if (unlikely(!task || !task->lldd_task || !task->dev)) 2357 return; 2358 2359 ts = &task->task_status; 2360 device = task->dev; 2361 ha = device->port->ha; 2362 sas_dev = device->lldd_dev; 2363 2364 spin_lock_irqsave(&task->task_state_lock, flags); 2365 task->task_state_flags &= ~SAS_TASK_STATE_PENDING; 2366 spin_unlock_irqrestore(&task->task_state_lock, flags); 2367 2368 memset(ts, 0, sizeof(*ts)); 2369 ts->resp = SAS_TASK_COMPLETE; 2370 2371 if (unlikely(!sas_dev)) { 2372 dev_dbg(dev, "slot complete: port has not device\n"); 2373 ts->stat = SAS_PHY_DOWN; 2374 goto out; 2375 } 2376 2377 dw0 = le32_to_cpu(complete_hdr->dw0); 2378 dw1 = le32_to_cpu(complete_hdr->dw1); 2379 dw3 = le32_to_cpu(complete_hdr->dw3); 2380 2381 /* 2382 * Use SAS+TMF status codes 2383 */ 2384 switch ((dw0 & CMPLT_HDR_ABORT_STAT_MSK) >> CMPLT_HDR_ABORT_STAT_OFF) { 2385 case STAT_IO_ABORTED: 2386 /* this IO has been aborted by abort command */ 2387 ts->stat = SAS_ABORTED_TASK; 2388 goto out; 2389 case STAT_IO_COMPLETE: 2390 /* internal abort command complete */ 2391 ts->stat = TMF_RESP_FUNC_SUCC; 2392 goto out; 2393 case STAT_IO_NO_DEVICE: 2394 ts->stat = TMF_RESP_FUNC_COMPLETE; 2395 goto out; 2396 case STAT_IO_NOT_VALID: 2397 /* 2398 * abort single IO, the controller can't find the IO 2399 */ 2400 ts->stat = TMF_RESP_FUNC_FAILED; 2401 goto out; 2402 default: 2403 break; 2404 } 2405 2406 /* check for erroneous completion */ 2407 if ((dw0 & CMPLT_HDR_CMPLT_MSK) == 0x3) { 2408 u32 *error_info = hisi_sas_status_buf_addr_mem(slot); 2409 2410 if (slot_err_v3_hw(hisi_hba, task, slot)) { 2411 if (ts->stat != SAS_DATA_UNDERRUN) 2412 dev_info(dev, "erroneous completion iptt=%d task=%p dev id=%d addr=%016llx CQ hdr: 0x%x 0x%x 0x%x 0x%x Error info: 0x%x 0x%x 0x%x 0x%x\n", 2413 slot->idx, task, sas_dev->device_id, 2414 SAS_ADDR(device->sas_addr), 2415 dw0, dw1, complete_hdr->act, dw3, 2416 error_info[0], error_info[1], 2417 error_info[2], error_info[3]); 2418 if (unlikely(slot->abort)) { 2419 if (dev_is_sata(device) && task->ata_task.use_ncq) 2420 sas_ata_device_link_abort(device, true); 2421 else 2422 sas_task_abort(task); 2423 2424 return; 2425 } 2426 goto out; 2427 } 2428 } 2429 2430 switch (task->task_proto) { 2431 case SAS_PROTOCOL_SSP: { 2432 struct ssp_response_iu *iu = 2433 hisi_sas_status_buf_addr_mem(slot) + 2434 sizeof(struct hisi_sas_err_record); 2435 2436 sas_ssp_task_response(dev, task, iu); 2437 break; 2438 } 2439 case SAS_PROTOCOL_SMP: { 2440 struct scatterlist *sg_resp = &task->smp_task.smp_resp; 2441 void *to = page_address(sg_page(sg_resp)); 2442 2443 ts->stat = SAS_SAM_STAT_GOOD; 2444 2445 memcpy(to + sg_resp->offset, 2446 hisi_sas_status_buf_addr_mem(slot) + 2447 sizeof(struct hisi_sas_err_record), 2448 sg_resp->length); 2449 break; 2450 } 2451 case SAS_PROTOCOL_SATA: 2452 case SAS_PROTOCOL_STP: 2453 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: 2454 ts->stat = SAS_SAM_STAT_GOOD; 2455 if (dw0 & CMPLT_HDR_RSPNS_XFRD_MSK) 2456 hisi_sas_sata_done(task, slot); 2457 break; 2458 default: 2459 ts->stat = SAS_SAM_STAT_CHECK_CONDITION; 2460 break; 2461 } 2462 2463 if (!slot->port->port_attached) { 2464 dev_warn(dev, "slot complete: port %d has removed\n", 2465 slot->port->sas_port.id); 2466 ts->stat = SAS_PHY_DOWN; 2467 } 2468 2469 out: 2470 spin_lock_irqsave(&task->task_state_lock, flags); 2471 if (task->task_state_flags & SAS_TASK_STATE_ABORTED) { 2472 spin_unlock_irqrestore(&task->task_state_lock, flags); 2473 dev_info(dev, "slot complete: task(%p) aborted\n", task); 2474 return; 2475 } 2476 task->task_state_flags |= SAS_TASK_STATE_DONE; 2477 spin_unlock_irqrestore(&task->task_state_lock, flags); 2478 hisi_sas_slot_task_free(hisi_hba, task, slot, true); 2479 2480 if (!is_internal && (task->task_proto != SAS_PROTOCOL_SMP)) { 2481 spin_lock_irqsave(&device->done_lock, flags); 2482 if (test_bit(SAS_HA_FROZEN, &ha->state)) { 2483 spin_unlock_irqrestore(&device->done_lock, flags); 2484 dev_info(dev, "slot complete: task(%p) ignored\n", 2485 task); 2486 return; 2487 } 2488 spin_unlock_irqrestore(&device->done_lock, flags); 2489 } 2490 2491 if (task->task_done) 2492 task->task_done(task); 2493 } 2494 2495 static int complete_v3_hw(struct hisi_sas_cq *cq) 2496 { 2497 struct hisi_sas_complete_v3_hdr *complete_queue; 2498 struct hisi_hba *hisi_hba = cq->hisi_hba; 2499 u32 rd_point, wr_point; 2500 int queue = cq->id; 2501 int completed; 2502 2503 rd_point = cq->rd_point; 2504 complete_queue = hisi_hba->complete_hdr[queue]; 2505 2506 wr_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_WR_PTR + 2507 (0x14 * queue)); 2508 completed = (wr_point + HISI_SAS_QUEUE_SLOTS - rd_point) % HISI_SAS_QUEUE_SLOTS; 2509 2510 while (rd_point != wr_point) { 2511 struct hisi_sas_complete_v3_hdr *complete_hdr; 2512 struct device *dev = hisi_hba->dev; 2513 struct hisi_sas_slot *slot; 2514 u32 dw0, dw1, dw3; 2515 int iptt; 2516 2517 complete_hdr = &complete_queue[rd_point]; 2518 dw0 = le32_to_cpu(complete_hdr->dw0); 2519 dw1 = le32_to_cpu(complete_hdr->dw1); 2520 dw3 = le32_to_cpu(complete_hdr->dw3); 2521 2522 iptt = dw1 & CMPLT_HDR_IPTT_MSK; 2523 if (unlikely((dw0 & CMPLT_HDR_CMPLT_MSK) == 0x3) && 2524 (dw3 & CMPLT_HDR_SATA_DISK_ERR_MSK)) { 2525 int device_id = (dw1 & CMPLT_HDR_DEV_ID_MSK) >> 2526 CMPLT_HDR_DEV_ID_OFF; 2527 struct hisi_sas_itct *itct = 2528 &hisi_hba->itct[device_id]; 2529 struct hisi_sas_device *sas_dev = 2530 &hisi_hba->devices[device_id]; 2531 struct domain_device *device = sas_dev->sas_device; 2532 2533 dev_err(dev, "erroneous completion disk err dev id=%d sas_addr=0x%llx CQ hdr: 0x%x 0x%x 0x%x 0x%x\n", 2534 device_id, itct->sas_addr, dw0, dw1, 2535 complete_hdr->act, dw3); 2536 2537 if (is_ncq_err_v3_hw(complete_hdr)) 2538 sas_dev->dev_status = HISI_SAS_DEV_NCQ_ERR; 2539 2540 sas_ata_device_link_abort(device, true); 2541 } else if (likely(iptt < HISI_SAS_COMMAND_ENTRIES_V3_HW)) { 2542 slot = &hisi_hba->slot_info[iptt]; 2543 slot->cmplt_queue_slot = rd_point; 2544 slot->cmplt_queue = queue; 2545 slot_complete_v3_hw(hisi_hba, slot); 2546 } else 2547 dev_err(dev, "IPTT %d is invalid, discard it.\n", iptt); 2548 2549 if (++rd_point >= HISI_SAS_QUEUE_SLOTS) 2550 rd_point = 0; 2551 } 2552 2553 /* update rd_point */ 2554 cq->rd_point = rd_point; 2555 hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point); 2556 cond_resched(); 2557 2558 return completed; 2559 } 2560 2561 static int queue_complete_v3_hw(struct Scsi_Host *shost, unsigned int queue) 2562 { 2563 struct hisi_hba *hisi_hba = shost_priv(shost); 2564 struct hisi_sas_cq *cq = &hisi_hba->cq[queue]; 2565 int completed; 2566 2567 spin_lock(&cq->poll_lock); 2568 completed = complete_v3_hw(cq); 2569 spin_unlock(&cq->poll_lock); 2570 2571 return completed; 2572 } 2573 2574 static irqreturn_t cq_thread_v3_hw(int irq_no, void *p) 2575 { 2576 struct hisi_sas_cq *cq = p; 2577 2578 complete_v3_hw(cq); 2579 2580 return IRQ_HANDLED; 2581 } 2582 2583 static irqreturn_t cq_interrupt_v3_hw(int irq_no, void *p) 2584 { 2585 struct hisi_sas_cq *cq = p; 2586 struct hisi_hba *hisi_hba = cq->hisi_hba; 2587 int queue = cq->id; 2588 2589 hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue); 2590 2591 return IRQ_WAKE_THREAD; 2592 } 2593 2594 static void hisi_sas_v3_free_vectors(void *data) 2595 { 2596 struct pci_dev *pdev = data; 2597 2598 pci_free_irq_vectors(pdev); 2599 } 2600 2601 static int interrupt_preinit_v3_hw(struct hisi_hba *hisi_hba) 2602 { 2603 /* Allocate all MSI vectors to avoid re-insertion issue */ 2604 int max_msi = HISI_SAS_MSI_COUNT_V3_HW; 2605 int vectors, min_msi; 2606 struct Scsi_Host *shost = hisi_hba->shost; 2607 struct pci_dev *pdev = hisi_hba->pci_dev; 2608 struct irq_affinity desc = { 2609 .pre_vectors = BASE_VECTORS_V3_HW, 2610 }; 2611 2612 min_msi = MIN_AFFINE_VECTORS_V3_HW; 2613 vectors = pci_alloc_irq_vectors_affinity(pdev, 2614 min_msi, max_msi, 2615 PCI_IRQ_MSI | 2616 PCI_IRQ_AFFINITY, 2617 &desc); 2618 if (vectors < 0) 2619 return -ENOENT; 2620 2621 hisi_hba->cq_nvecs = vectors - BASE_VECTORS_V3_HW - hisi_hba->iopoll_q_cnt; 2622 shost->nr_hw_queues = hisi_hba->cq_nvecs + hisi_hba->iopoll_q_cnt; 2623 2624 return devm_add_action(&pdev->dev, hisi_sas_v3_free_vectors, pdev); 2625 } 2626 2627 static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba) 2628 { 2629 struct device *dev = hisi_hba->dev; 2630 struct pci_dev *pdev = hisi_hba->pci_dev; 2631 int rc, i; 2632 2633 rc = devm_request_irq(dev, pci_irq_vector(pdev, IRQ_PHY_UP_DOWN_INDEX), 2634 int_phy_up_down_bcast_v3_hw, 0, 2635 DRV_NAME " phy", hisi_hba); 2636 if (rc) { 2637 dev_err(dev, "could not request phy interrupt, rc=%d\n", rc); 2638 return -ENOENT; 2639 } 2640 2641 rc = devm_request_irq(dev, pci_irq_vector(pdev, IRQ_CHL_INDEX), 2642 int_chnl_int_v3_hw, 0, 2643 DRV_NAME " channel", hisi_hba); 2644 if (rc) { 2645 dev_err(dev, "could not request chnl interrupt, rc=%d\n", rc); 2646 return -ENOENT; 2647 } 2648 2649 rc = devm_request_irq(dev, pci_irq_vector(pdev, IRQ_AXI_INDEX), 2650 fatal_axi_int_v3_hw, 0, 2651 DRV_NAME " fatal", hisi_hba); 2652 if (rc) { 2653 dev_err(dev, "could not request fatal interrupt, rc=%d\n", rc); 2654 return -ENOENT; 2655 } 2656 2657 if (hisi_sas_intr_conv) 2658 dev_info(dev, "Enable interrupt converge\n"); 2659 2660 for (i = 0; i < hisi_hba->cq_nvecs; i++) { 2661 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 2662 int nr = hisi_sas_intr_conv ? BASE_VECTORS_V3_HW : 2663 BASE_VECTORS_V3_HW + i; 2664 unsigned long irqflags = hisi_sas_intr_conv ? IRQF_SHARED : 2665 IRQF_ONESHOT; 2666 2667 cq->irq_no = pci_irq_vector(pdev, nr); 2668 rc = devm_request_threaded_irq(dev, cq->irq_no, 2669 cq_interrupt_v3_hw, 2670 cq_thread_v3_hw, 2671 irqflags, 2672 DRV_NAME " cq", cq); 2673 if (rc) { 2674 dev_err(dev, "could not request cq%d interrupt, rc=%d\n", 2675 i, rc); 2676 return -ENOENT; 2677 } 2678 cq->irq_mask = pci_irq_get_affinity(pdev, i + BASE_VECTORS_V3_HW); 2679 if (!cq->irq_mask) { 2680 dev_err(dev, "could not get cq%d irq affinity!\n", i); 2681 return -ENOENT; 2682 } 2683 } 2684 2685 return 0; 2686 } 2687 2688 static int hisi_sas_v3_init(struct hisi_hba *hisi_hba) 2689 { 2690 int rc; 2691 2692 rc = hw_init_v3_hw(hisi_hba); 2693 if (rc) 2694 return rc; 2695 2696 rc = interrupt_init_v3_hw(hisi_hba); 2697 if (rc) 2698 return rc; 2699 2700 return 0; 2701 } 2702 2703 static void phy_set_linkrate_v3_hw(struct hisi_hba *hisi_hba, int phy_no, 2704 struct sas_phy_linkrates *r) 2705 { 2706 enum sas_linkrate max = r->maximum_linkrate; 2707 u32 prog_phy_link_rate = hisi_sas_phy_read32(hisi_hba, phy_no, 2708 PROG_PHY_LINK_RATE); 2709 2710 prog_phy_link_rate &= ~CFG_PROG_PHY_LINK_RATE_MSK; 2711 prog_phy_link_rate |= hisi_sas_get_prog_phy_linkrate_mask(max); 2712 hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE, 2713 prog_phy_link_rate); 2714 } 2715 2716 static void interrupt_disable_v3_hw(struct hisi_hba *hisi_hba) 2717 { 2718 struct pci_dev *pdev = hisi_hba->pci_dev; 2719 int i; 2720 2721 synchronize_irq(pci_irq_vector(pdev, IRQ_PHY_UP_DOWN_INDEX)); 2722 synchronize_irq(pci_irq_vector(pdev, IRQ_CHL_INDEX)); 2723 synchronize_irq(pci_irq_vector(pdev, IRQ_AXI_INDEX)); 2724 for (i = 0; i < hisi_hba->queue_count; i++) 2725 hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK + 0x4 * i, 0x1); 2726 2727 for (i = 0; i < hisi_hba->cq_nvecs; i++) 2728 synchronize_irq(pci_irq_vector(pdev, i + BASE_VECTORS_V3_HW)); 2729 2730 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xffffffff); 2731 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xffffffff); 2732 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffffffff); 2733 hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xffffffff); 2734 2735 for (i = 0; i < hisi_hba->n_phy; i++) { 2736 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff); 2737 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0xffffffff); 2738 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x1); 2739 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_PHY_ENA_MSK, 0x1); 2740 hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x1); 2741 } 2742 } 2743 2744 static u32 get_phys_state_v3_hw(struct hisi_hba *hisi_hba) 2745 { 2746 return hisi_sas_read32(hisi_hba, PHY_STATE); 2747 } 2748 2749 static int disable_host_v3_hw(struct hisi_hba *hisi_hba) 2750 { 2751 struct device *dev = hisi_hba->dev; 2752 u32 status, reg_val; 2753 int rc; 2754 2755 hisi_sas_sync_poll_cqs(hisi_hba); 2756 hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0); 2757 2758 hisi_sas_stop_phys(hisi_hba); 2759 2760 mdelay(HISI_SAS_DELAY_FOR_PHY_DISABLE); 2761 2762 reg_val = hisi_sas_read32(hisi_hba, AXI_MASTER_CFG_BASE + 2763 AM_CTRL_GLOBAL); 2764 reg_val |= AM_CTRL_SHUTDOWN_REQ_MSK; 2765 hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE + 2766 AM_CTRL_GLOBAL, reg_val); 2767 2768 /* wait until bus idle */ 2769 rc = hisi_sas_read32_poll_timeout(AXI_MASTER_CFG_BASE + 2770 AM_CURR_TRANS_RETURN, status, 2771 status == 0x3, 10, 100); 2772 if (rc) { 2773 dev_err(dev, "axi bus is not idle, rc=%d\n", rc); 2774 return rc; 2775 } 2776 2777 return 0; 2778 } 2779 2780 static int soft_reset_v3_hw(struct hisi_hba *hisi_hba) 2781 { 2782 struct device *dev = hisi_hba->dev; 2783 int rc; 2784 2785 interrupt_disable_v3_hw(hisi_hba); 2786 rc = disable_host_v3_hw(hisi_hba); 2787 if (rc) { 2788 dev_err(dev, "soft reset: disable host failed rc=%d\n", rc); 2789 return rc; 2790 } 2791 2792 hisi_sas_init_mem(hisi_hba); 2793 2794 return hw_init_v3_hw(hisi_hba); 2795 } 2796 2797 static int write_gpio_v3_hw(struct hisi_hba *hisi_hba, u8 reg_type, 2798 u8 reg_index, u8 reg_count, u8 *write_data) 2799 { 2800 struct device *dev = hisi_hba->dev; 2801 u32 *data = (u32 *)write_data; 2802 int i; 2803 2804 switch (reg_type) { 2805 case SAS_GPIO_REG_TX: 2806 if ((reg_index + reg_count) > ((hisi_hba->n_phy + 3) / 4)) { 2807 dev_err(dev, "write gpio: invalid reg range[%d, %d]\n", 2808 reg_index, reg_index + reg_count - 1); 2809 return -EINVAL; 2810 } 2811 2812 for (i = 0; i < reg_count; i++) 2813 hisi_sas_write32(hisi_hba, 2814 SAS_GPIO_TX_0_1 + (reg_index + i) * 4, 2815 data[i]); 2816 break; 2817 default: 2818 dev_err(dev, "write gpio: unsupported or bad reg type %d\n", 2819 reg_type); 2820 return -EINVAL; 2821 } 2822 2823 return 0; 2824 } 2825 2826 static void wait_cmds_complete_timeout_v3_hw(struct hisi_hba *hisi_hba, 2827 int delay_ms, int timeout_ms) 2828 { 2829 struct device *dev = hisi_hba->dev; 2830 int entries, entries_old = 0, time; 2831 2832 for (time = 0; time < timeout_ms; time += delay_ms) { 2833 entries = hisi_sas_read32(hisi_hba, CQE_SEND_CNT); 2834 if (entries == entries_old) 2835 break; 2836 2837 entries_old = entries; 2838 msleep(delay_ms); 2839 } 2840 2841 if (time >= timeout_ms) { 2842 dev_dbg(dev, "Wait commands complete timeout!\n"); 2843 return; 2844 } 2845 2846 dev_dbg(dev, "wait commands complete %dms\n", time); 2847 } 2848 2849 static ssize_t intr_conv_v3_hw_show(struct device *dev, 2850 struct device_attribute *attr, char *buf) 2851 { 2852 return scnprintf(buf, PAGE_SIZE, "%u\n", hisi_sas_intr_conv); 2853 } 2854 static DEVICE_ATTR_RO(intr_conv_v3_hw); 2855 2856 static void config_intr_coal_v3_hw(struct hisi_hba *hisi_hba) 2857 { 2858 /* config those registers between enable and disable PHYs */ 2859 hisi_sas_stop_phys(hisi_hba); 2860 hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x3); 2861 2862 if (hisi_hba->intr_coal_ticks == 0 || 2863 hisi_hba->intr_coal_count == 0) { 2864 /* configure the interrupt coalescing timeout period 10us */ 2865 hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0xa); 2866 /* configure the count of CQ entries 10 */ 2867 hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0xa); 2868 } else { 2869 hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 2870 hisi_hba->intr_coal_ticks); 2871 hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 2872 hisi_hba->intr_coal_count); 2873 } 2874 phys_init_v3_hw(hisi_hba); 2875 } 2876 2877 static ssize_t intr_coal_ticks_v3_hw_show(struct device *dev, 2878 struct device_attribute *attr, 2879 char *buf) 2880 { 2881 struct Scsi_Host *shost = class_to_shost(dev); 2882 struct hisi_hba *hisi_hba = shost_priv(shost); 2883 2884 return scnprintf(buf, PAGE_SIZE, "%u\n", 2885 hisi_hba->intr_coal_ticks); 2886 } 2887 2888 static ssize_t intr_coal_ticks_v3_hw_store(struct device *dev, 2889 struct device_attribute *attr, 2890 const char *buf, size_t count) 2891 { 2892 struct Scsi_Host *shost = class_to_shost(dev); 2893 struct hisi_hba *hisi_hba = shost_priv(shost); 2894 u32 intr_coal_ticks; 2895 int ret; 2896 2897 ret = kstrtou32(buf, DECIMALISM_FLAG, &intr_coal_ticks); 2898 if (ret) { 2899 dev_err(dev, "Input data of interrupt coalesce unmatch\n"); 2900 return -EINVAL; 2901 } 2902 2903 if (intr_coal_ticks >= BIT(TICKS_BIT_INDEX)) { 2904 dev_err(dev, "intr_coal_ticks must be less than 2^24!\n"); 2905 return -EINVAL; 2906 } 2907 2908 hisi_hba->intr_coal_ticks = intr_coal_ticks; 2909 2910 config_intr_coal_v3_hw(hisi_hba); 2911 2912 return count; 2913 } 2914 static DEVICE_ATTR_RW(intr_coal_ticks_v3_hw); 2915 2916 static ssize_t intr_coal_count_v3_hw_show(struct device *dev, 2917 struct device_attribute 2918 *attr, char *buf) 2919 { 2920 struct Scsi_Host *shost = class_to_shost(dev); 2921 struct hisi_hba *hisi_hba = shost_priv(shost); 2922 2923 return scnprintf(buf, PAGE_SIZE, "%u\n", 2924 hisi_hba->intr_coal_count); 2925 } 2926 2927 static ssize_t intr_coal_count_v3_hw_store(struct device *dev, 2928 struct device_attribute 2929 *attr, const char *buf, size_t count) 2930 { 2931 struct Scsi_Host *shost = class_to_shost(dev); 2932 struct hisi_hba *hisi_hba = shost_priv(shost); 2933 u32 intr_coal_count; 2934 int ret; 2935 2936 ret = kstrtou32(buf, DECIMALISM_FLAG, &intr_coal_count); 2937 if (ret) { 2938 dev_err(dev, "Input data of interrupt coalesce unmatch\n"); 2939 return -EINVAL; 2940 } 2941 2942 if (intr_coal_count >= BIT(COUNT_BIT_INDEX)) { 2943 dev_err(dev, "intr_coal_count must be less than 2^8!\n"); 2944 return -EINVAL; 2945 } 2946 2947 hisi_hba->intr_coal_count = intr_coal_count; 2948 2949 config_intr_coal_v3_hw(hisi_hba); 2950 2951 return count; 2952 } 2953 static DEVICE_ATTR_RW(intr_coal_count_v3_hw); 2954 2955 static ssize_t iopoll_q_cnt_v3_hw_show(struct device *dev, 2956 struct device_attribute 2957 *attr, char *buf) 2958 { 2959 struct Scsi_Host *shost = class_to_shost(dev); 2960 struct hisi_hba *hisi_hba = shost_priv(shost); 2961 2962 return scnprintf(buf, PAGE_SIZE, "%u\n", 2963 hisi_hba->iopoll_q_cnt); 2964 } 2965 static DEVICE_ATTR_RO(iopoll_q_cnt_v3_hw); 2966 2967 static int sdev_configure_v3_hw(struct scsi_device *sdev, 2968 struct queue_limits *lim) 2969 { 2970 struct Scsi_Host *shost = dev_to_shost(&sdev->sdev_gendev); 2971 struct hisi_hba *hisi_hba = shost_priv(shost); 2972 int ret = hisi_sas_sdev_configure(sdev, lim); 2973 struct device *dev = hisi_hba->dev; 2974 2975 if (ret) 2976 return ret; 2977 2978 if (sdev->type == TYPE_ENCLOSURE) 2979 return 0; 2980 2981 if (!device_link_add(&sdev->sdev_gendev, dev, 2982 DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE)) { 2983 if (pm_runtime_enabled(dev)) { 2984 dev_info(dev, "add device link failed, disable runtime PM for the host\n"); 2985 pm_runtime_disable(dev); 2986 } 2987 } 2988 2989 return 0; 2990 } 2991 2992 static struct attribute *host_v3_hw_attrs[] = { 2993 &dev_attr_phy_event_threshold.attr, 2994 &dev_attr_intr_conv_v3_hw.attr, 2995 &dev_attr_intr_coal_ticks_v3_hw.attr, 2996 &dev_attr_intr_coal_count_v3_hw.attr, 2997 &dev_attr_iopoll_q_cnt_v3_hw.attr, 2998 NULL 2999 }; 3000 3001 ATTRIBUTE_GROUPS(host_v3_hw); 3002 3003 static const struct attribute_group *sdev_groups_v3_hw[] = { 3004 &sas_ata_sdev_attr_group, 3005 NULL 3006 }; 3007 3008 #define HISI_SAS_DEBUGFS_REG(x) {#x, x} 3009 3010 struct hisi_sas_debugfs_reg_lu { 3011 char *name; 3012 int off; 3013 }; 3014 3015 struct hisi_sas_debugfs_reg { 3016 const struct hisi_sas_debugfs_reg_lu *lu; 3017 int count; 3018 int base_off; 3019 }; 3020 3021 static const struct hisi_sas_debugfs_reg_lu debugfs_port_reg_lu[] = { 3022 HISI_SAS_DEBUGFS_REG(PHY_CFG), 3023 HISI_SAS_DEBUGFS_REG(HARD_PHY_LINKRATE), 3024 HISI_SAS_DEBUGFS_REG(PROG_PHY_LINK_RATE), 3025 HISI_SAS_DEBUGFS_REG(PHY_CTRL), 3026 HISI_SAS_DEBUGFS_REG(SL_CFG), 3027 HISI_SAS_DEBUGFS_REG(AIP_LIMIT), 3028 HISI_SAS_DEBUGFS_REG(SL_CONTROL), 3029 HISI_SAS_DEBUGFS_REG(RX_PRIMS_STATUS), 3030 HISI_SAS_DEBUGFS_REG(TX_ID_DWORD0), 3031 HISI_SAS_DEBUGFS_REG(TX_ID_DWORD1), 3032 HISI_SAS_DEBUGFS_REG(TX_ID_DWORD2), 3033 HISI_SAS_DEBUGFS_REG(TX_ID_DWORD3), 3034 HISI_SAS_DEBUGFS_REG(TX_ID_DWORD4), 3035 HISI_SAS_DEBUGFS_REG(TX_ID_DWORD5), 3036 HISI_SAS_DEBUGFS_REG(TX_ID_DWORD6), 3037 HISI_SAS_DEBUGFS_REG(TXID_AUTO), 3038 HISI_SAS_DEBUGFS_REG(RX_IDAF_DWORD0), 3039 HISI_SAS_DEBUGFS_REG(RXOP_CHECK_CFG_H), 3040 HISI_SAS_DEBUGFS_REG(STP_LINK_TIMER), 3041 HISI_SAS_DEBUGFS_REG(STP_LINK_TIMEOUT_STATE), 3042 HISI_SAS_DEBUGFS_REG(CON_CFG_DRIVER), 3043 HISI_SAS_DEBUGFS_REG(SAS_SSP_CON_TIMER_CFG), 3044 HISI_SAS_DEBUGFS_REG(SAS_SMP_CON_TIMER_CFG), 3045 HISI_SAS_DEBUGFS_REG(SAS_STP_CON_TIMER_CFG), 3046 HISI_SAS_DEBUGFS_REG(CHL_INT0), 3047 HISI_SAS_DEBUGFS_REG(CHL_INT1), 3048 HISI_SAS_DEBUGFS_REG(CHL_INT2), 3049 HISI_SAS_DEBUGFS_REG(CHL_INT0_MSK), 3050 HISI_SAS_DEBUGFS_REG(CHL_INT1_MSK), 3051 HISI_SAS_DEBUGFS_REG(CHL_INT2_MSK), 3052 HISI_SAS_DEBUGFS_REG(SAS_EC_INT_COAL_TIME), 3053 HISI_SAS_DEBUGFS_REG(CHL_INT_COAL_EN), 3054 HISI_SAS_DEBUGFS_REG(SAS_RX_TRAIN_TIMER), 3055 HISI_SAS_DEBUGFS_REG(PHY_CTRL_RDY_MSK), 3056 HISI_SAS_DEBUGFS_REG(PHYCTRL_NOT_RDY_MSK), 3057 HISI_SAS_DEBUGFS_REG(PHYCTRL_DWS_RESET_MSK), 3058 HISI_SAS_DEBUGFS_REG(PHYCTRL_PHY_ENA_MSK), 3059 HISI_SAS_DEBUGFS_REG(SL_RX_BCAST_CHK_MSK), 3060 HISI_SAS_DEBUGFS_REG(PHYCTRL_OOB_RESTART_MSK), 3061 HISI_SAS_DEBUGFS_REG(DMA_TX_STATUS), 3062 HISI_SAS_DEBUGFS_REG(DMA_RX_STATUS), 3063 HISI_SAS_DEBUGFS_REG(COARSETUNE_TIME), 3064 HISI_SAS_DEBUGFS_REG(ERR_CNT_DWS_LOST), 3065 HISI_SAS_DEBUGFS_REG(ERR_CNT_RESET_PROB), 3066 HISI_SAS_DEBUGFS_REG(ERR_CNT_INVLD_DW), 3067 HISI_SAS_DEBUGFS_REG(ERR_CNT_CODE_ERR), 3068 HISI_SAS_DEBUGFS_REG(ERR_CNT_DISP_ERR), 3069 {} 3070 }; 3071 3072 static const struct hisi_sas_debugfs_reg debugfs_port_reg = { 3073 .lu = debugfs_port_reg_lu, 3074 .count = PORT_REG_LENGTH, 3075 .base_off = PORT_BASE, 3076 }; 3077 3078 static const struct hisi_sas_debugfs_reg_lu debugfs_global_reg_lu[] = { 3079 HISI_SAS_DEBUGFS_REG(DLVRY_QUEUE_ENABLE), 3080 HISI_SAS_DEBUGFS_REG(PHY_CONTEXT), 3081 HISI_SAS_DEBUGFS_REG(PHY_STATE), 3082 HISI_SAS_DEBUGFS_REG(PHY_PORT_NUM_MA), 3083 HISI_SAS_DEBUGFS_REG(PHY_CONN_RATE), 3084 HISI_SAS_DEBUGFS_REG(ITCT_CLR), 3085 HISI_SAS_DEBUGFS_REG(IO_SATA_BROKEN_MSG_ADDR_LO), 3086 HISI_SAS_DEBUGFS_REG(IO_SATA_BROKEN_MSG_ADDR_HI), 3087 HISI_SAS_DEBUGFS_REG(SATA_INITI_D2H_STORE_ADDR_LO), 3088 HISI_SAS_DEBUGFS_REG(SATA_INITI_D2H_STORE_ADDR_HI), 3089 HISI_SAS_DEBUGFS_REG(CFG_MAX_TAG), 3090 HISI_SAS_DEBUGFS_REG(TRANS_LOCK_ICT_TIME), 3091 HISI_SAS_DEBUGFS_REG(HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL), 3092 HISI_SAS_DEBUGFS_REG(HGC_SAS_TXFAIL_RETRY_CTRL), 3093 HISI_SAS_DEBUGFS_REG(HGC_GET_ITV_TIME), 3094 HISI_SAS_DEBUGFS_REG(DEVICE_MSG_WORK_MODE), 3095 HISI_SAS_DEBUGFS_REG(OPENA_WT_CONTI_TIME), 3096 HISI_SAS_DEBUGFS_REG(I_T_NEXUS_LOSS_TIME), 3097 HISI_SAS_DEBUGFS_REG(MAX_CON_TIME_LIMIT_TIME), 3098 HISI_SAS_DEBUGFS_REG(BUS_INACTIVE_LIMIT_TIME), 3099 HISI_SAS_DEBUGFS_REG(REJECT_TO_OPEN_LIMIT_TIME), 3100 HISI_SAS_DEBUGFS_REG(CQ_INT_CONVERGE_EN), 3101 HISI_SAS_DEBUGFS_REG(CFG_AGING_TIME), 3102 HISI_SAS_DEBUGFS_REG(HGC_DFX_CFG2), 3103 HISI_SAS_DEBUGFS_REG(CFG_ABT_SET_QUERY_IPTT), 3104 HISI_SAS_DEBUGFS_REG(CFG_ABT_SET_IPTT_DONE), 3105 HISI_SAS_DEBUGFS_REG(HGC_IOMB_PROC1_STATUS), 3106 HISI_SAS_DEBUGFS_REG(CHNL_INT_STATUS), 3107 HISI_SAS_DEBUGFS_REG(HGC_AXI_FIFO_ERR_INFO), 3108 HISI_SAS_DEBUGFS_REG(INT_COAL_EN), 3109 HISI_SAS_DEBUGFS_REG(OQ_INT_COAL_TIME), 3110 HISI_SAS_DEBUGFS_REG(OQ_INT_COAL_CNT), 3111 HISI_SAS_DEBUGFS_REG(ENT_INT_COAL_TIME), 3112 HISI_SAS_DEBUGFS_REG(ENT_INT_COAL_CNT), 3113 HISI_SAS_DEBUGFS_REG(OQ_INT_SRC), 3114 HISI_SAS_DEBUGFS_REG(OQ_INT_SRC_MSK), 3115 HISI_SAS_DEBUGFS_REG(ENT_INT_SRC1), 3116 HISI_SAS_DEBUGFS_REG(ENT_INT_SRC2), 3117 HISI_SAS_DEBUGFS_REG(ENT_INT_SRC3), 3118 HISI_SAS_DEBUGFS_REG(ENT_INT_SRC_MSK1), 3119 HISI_SAS_DEBUGFS_REG(ENT_INT_SRC_MSK2), 3120 HISI_SAS_DEBUGFS_REG(ENT_INT_SRC_MSK3), 3121 HISI_SAS_DEBUGFS_REG(CHNL_PHYUPDOWN_INT_MSK), 3122 HISI_SAS_DEBUGFS_REG(CHNL_ENT_INT_MSK), 3123 HISI_SAS_DEBUGFS_REG(HGC_COM_INT_MSK), 3124 HISI_SAS_DEBUGFS_REG(SAS_ECC_INTR), 3125 HISI_SAS_DEBUGFS_REG(SAS_ECC_INTR_MSK), 3126 HISI_SAS_DEBUGFS_REG(HGC_ERR_STAT_EN), 3127 HISI_SAS_DEBUGFS_REG(CQE_SEND_CNT), 3128 HISI_SAS_DEBUGFS_REG(DLVRY_Q_0_DEPTH), 3129 HISI_SAS_DEBUGFS_REG(DLVRY_Q_0_WR_PTR), 3130 HISI_SAS_DEBUGFS_REG(DLVRY_Q_0_RD_PTR), 3131 HISI_SAS_DEBUGFS_REG(HYPER_STREAM_ID_EN_CFG), 3132 HISI_SAS_DEBUGFS_REG(OQ0_INT_SRC_MSK), 3133 HISI_SAS_DEBUGFS_REG(COMPL_Q_0_DEPTH), 3134 HISI_SAS_DEBUGFS_REG(COMPL_Q_0_WR_PTR), 3135 HISI_SAS_DEBUGFS_REG(COMPL_Q_0_RD_PTR), 3136 HISI_SAS_DEBUGFS_REG(AWQOS_AWCACHE_CFG), 3137 HISI_SAS_DEBUGFS_REG(ARQOS_ARCACHE_CFG), 3138 HISI_SAS_DEBUGFS_REG(HILINK_ERR_DFX), 3139 HISI_SAS_DEBUGFS_REG(SAS_GPIO_CFG_0), 3140 HISI_SAS_DEBUGFS_REG(SAS_GPIO_CFG_1), 3141 HISI_SAS_DEBUGFS_REG(SAS_GPIO_TX_0_1), 3142 HISI_SAS_DEBUGFS_REG(SAS_CFG_DRIVE_VLD), 3143 {} 3144 }; 3145 3146 static const struct hisi_sas_debugfs_reg debugfs_global_reg = { 3147 .lu = debugfs_global_reg_lu, 3148 .count = GLOBAL_REG_LENGTH, 3149 }; 3150 3151 static const struct hisi_sas_debugfs_reg_lu debugfs_axi_reg_lu[] = { 3152 HISI_SAS_DEBUGFS_REG(AM_CFG_MAX_TRANS), 3153 HISI_SAS_DEBUGFS_REG(AM_CFG_SINGLE_PORT_MAX_TRANS), 3154 HISI_SAS_DEBUGFS_REG(AXI_CFG), 3155 HISI_SAS_DEBUGFS_REG(AM_ROB_ECC_ERR_ADDR), 3156 {} 3157 }; 3158 3159 static const struct hisi_sas_debugfs_reg debugfs_axi_reg = { 3160 .lu = debugfs_axi_reg_lu, 3161 .count = AXI_REG_LENGTH, 3162 .base_off = AXI_MASTER_CFG_BASE, 3163 }; 3164 3165 static const struct hisi_sas_debugfs_reg_lu debugfs_ras_reg_lu[] = { 3166 HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR0), 3167 HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR1), 3168 HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR0_MASK), 3169 HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR1_MASK), 3170 HISI_SAS_DEBUGFS_REG(CFG_SAS_RAS_INTR_MASK), 3171 HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR2), 3172 HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR2_MASK), 3173 {} 3174 }; 3175 3176 static const struct hisi_sas_debugfs_reg debugfs_ras_reg = { 3177 .lu = debugfs_ras_reg_lu, 3178 .count = RAS_REG_LENGTH, 3179 .base_off = RAS_BASE, 3180 }; 3181 3182 static void debugfs_snapshot_prepare_v3_hw(struct hisi_hba *hisi_hba) 3183 { 3184 struct Scsi_Host *shost = hisi_hba->shost; 3185 3186 scsi_block_requests(shost); 3187 wait_cmds_complete_timeout_v3_hw(hisi_hba, WAIT_RETRY, WAIT_TMROUT); 3188 3189 set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 3190 hisi_sas_sync_cqs(hisi_hba); 3191 hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0); 3192 } 3193 3194 static void debugfs_snapshot_restore_v3_hw(struct hisi_hba *hisi_hba) 3195 { 3196 struct Scsi_Host *shost = hisi_hba->shost; 3197 3198 hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 3199 (u32)((1ULL << hisi_hba->queue_count) - 1)); 3200 3201 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 3202 scsi_unblock_requests(shost); 3203 } 3204 3205 static void read_iost_itct_cache_v3_hw(struct hisi_hba *hisi_hba, 3206 enum hisi_sas_debugfs_cache_type type, 3207 u32 *cache) 3208 { 3209 u32 cache_dw_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * 3210 HISI_SAS_IOST_ITCT_CACHE_NUM; 3211 struct device *dev = hisi_hba->dev; 3212 u32 *buf = cache; 3213 u32 i, val; 3214 3215 hisi_sas_write32(hisi_hba, TAB_RD_TYPE, type); 3216 3217 for (i = 0; i < HISI_SAS_IOST_ITCT_CACHE_DW_SZ; i++) { 3218 val = hisi_sas_read32(hisi_hba, TAB_DFX); 3219 if (val == 0xffffffff) 3220 break; 3221 } 3222 3223 if (val != 0xffffffff) { 3224 dev_err(dev, "Issue occurred in reading IOST/ITCT cache!\n"); 3225 return; 3226 } 3227 3228 memset(buf, 0, cache_dw_size * BYTE_TO_DW); 3229 buf[0] = val; 3230 3231 for (i = 1; i < cache_dw_size; i++) 3232 buf[i] = hisi_sas_read32(hisi_hba, TAB_DFX); 3233 } 3234 3235 static void hisi_sas_bist_test_prep_v3_hw(struct hisi_hba *hisi_hba) 3236 { 3237 u32 reg_val; 3238 int phy_no = hisi_hba->debugfs_bist_phy_no; 3239 int i; 3240 3241 /* disable PHY */ 3242 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 3243 3244 /* update FFE */ 3245 for (i = 0; i < FFE_CFG_MAX; i++) 3246 hisi_sas_phy_write32(hisi_hba, phy_no, TXDEEMPH_G1 + (i * 0x4), 3247 hisi_hba->debugfs_bist_ffe[phy_no][i]); 3248 3249 /* disable ALOS */ 3250 reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, SERDES_CFG); 3251 reg_val |= CFG_ALOS_CHK_DISABLE_MSK; 3252 hisi_sas_phy_write32(hisi_hba, phy_no, SERDES_CFG, reg_val); 3253 } 3254 3255 static void hisi_sas_bist_test_restore_v3_hw(struct hisi_hba *hisi_hba) 3256 { 3257 u32 reg_val; 3258 int phy_no = hisi_hba->debugfs_bist_phy_no; 3259 3260 /* disable loopback */ 3261 reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, SAS_PHY_BIST_CTRL); 3262 reg_val &= ~(CFG_RX_BIST_EN_MSK | CFG_TX_BIST_EN_MSK | 3263 CFG_BIST_TEST_MSK); 3264 hisi_sas_phy_write32(hisi_hba, phy_no, SAS_PHY_BIST_CTRL, reg_val); 3265 3266 /* enable ALOS */ 3267 reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, SERDES_CFG); 3268 reg_val &= ~CFG_ALOS_CHK_DISABLE_MSK; 3269 hisi_sas_phy_write32(hisi_hba, phy_no, SERDES_CFG, reg_val); 3270 3271 /* restore the linkrate */ 3272 reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, PROG_PHY_LINK_RATE); 3273 /* init OOB link rate as 1.5 Gbits */ 3274 reg_val &= ~CFG_PROG_OOB_PHY_LINK_RATE_MSK; 3275 reg_val |= (SAS_LINK_RATE_1_5_GBPS << CFG_PROG_OOB_PHY_LINK_RATE_OFF); 3276 hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE, reg_val); 3277 3278 /* enable PHY */ 3279 hisi_sas_phy_enable(hisi_hba, phy_no, 1); 3280 } 3281 3282 #define SAS_PHY_BIST_CODE_INIT 0x1 3283 #define SAS_PHY_BIST_CODE1_INIT 0X80 3284 #define SAS_PHY_BIST_INIT_DELAY 100 3285 #define SAS_PHY_BIST_LOOP_TEST_0 1 3286 #define SAS_PHY_BIST_LOOP_TEST_1 2 3287 static int debugfs_set_bist_v3_hw(struct hisi_hba *hisi_hba, bool enable) 3288 { 3289 u32 reg_val, mode_tmp; 3290 u32 linkrate = hisi_hba->debugfs_bist_linkrate; 3291 u32 phy_no = hisi_hba->debugfs_bist_phy_no; 3292 u32 *ffe = hisi_hba->debugfs_bist_ffe[phy_no]; 3293 u32 code_mode = hisi_hba->debugfs_bist_code_mode; 3294 u32 path_mode = hisi_hba->debugfs_bist_mode; 3295 u32 *fix_code = &hisi_hba->debugfs_bist_fixed_code[0]; 3296 struct device *dev = hisi_hba->dev; 3297 3298 dev_info(dev, "BIST info:phy%d link_rate=%d code_mode=%d path_mode=%d ffe={0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x} fixed_code={0x%x, 0x%x}\n", 3299 phy_no, linkrate, code_mode, path_mode, 3300 ffe[FFE_SAS_1_5_GBPS], ffe[FFE_SAS_3_0_GBPS], 3301 ffe[FFE_SAS_6_0_GBPS], ffe[FFE_SAS_12_0_GBPS], 3302 ffe[FFE_SATA_1_5_GBPS], ffe[FFE_SATA_3_0_GBPS], 3303 ffe[FFE_SATA_6_0_GBPS], fix_code[FIXED_CODE], 3304 fix_code[FIXED_CODE_1]); 3305 mode_tmp = path_mode ? SAS_PHY_BIST_LOOP_TEST_1 : 3306 SAS_PHY_BIST_LOOP_TEST_0; 3307 if (enable) { 3308 /* some preparations before bist test */ 3309 hisi_sas_bist_test_prep_v3_hw(hisi_hba); 3310 3311 /* set linkrate of bit test */ 3312 reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, 3313 PROG_PHY_LINK_RATE); 3314 reg_val &= ~CFG_PROG_OOB_PHY_LINK_RATE_MSK; 3315 reg_val |= (linkrate << CFG_PROG_OOB_PHY_LINK_RATE_OFF); 3316 hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE, 3317 reg_val); 3318 3319 /* set code mode of bit test */ 3320 reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, 3321 SAS_PHY_BIST_CTRL); 3322 reg_val &= ~(CFG_BIST_MODE_SEL_MSK | CFG_LOOP_TEST_MODE_MSK | 3323 CFG_RX_BIST_EN_MSK | CFG_TX_BIST_EN_MSK | 3324 CFG_BIST_TEST_MSK); 3325 reg_val |= ((code_mode << CFG_BIST_MODE_SEL_OFF) | 3326 (mode_tmp << CFG_LOOP_TEST_MODE_OFF) | 3327 CFG_BIST_TEST_MSK); 3328 hisi_sas_phy_write32(hisi_hba, phy_no, SAS_PHY_BIST_CTRL, 3329 reg_val); 3330 3331 /* set the bist init value */ 3332 if (code_mode == HISI_SAS_BIST_CODE_MODE_FIXED_DATA) { 3333 reg_val = hisi_hba->debugfs_bist_fixed_code[0]; 3334 hisi_sas_phy_write32(hisi_hba, phy_no, 3335 SAS_PHY_BIST_CODE, reg_val); 3336 3337 reg_val = hisi_hba->debugfs_bist_fixed_code[1]; 3338 hisi_sas_phy_write32(hisi_hba, phy_no, 3339 SAS_PHY_BIST_CODE1, reg_val); 3340 } else { 3341 hisi_sas_phy_write32(hisi_hba, phy_no, 3342 SAS_PHY_BIST_CODE, 3343 SAS_PHY_BIST_CODE_INIT); 3344 hisi_sas_phy_write32(hisi_hba, phy_no, 3345 SAS_PHY_BIST_CODE1, 3346 SAS_PHY_BIST_CODE1_INIT); 3347 } 3348 3349 mdelay(SAS_PHY_BIST_INIT_DELAY); 3350 reg_val |= (CFG_RX_BIST_EN_MSK | CFG_TX_BIST_EN_MSK); 3351 hisi_sas_phy_write32(hisi_hba, phy_no, SAS_PHY_BIST_CTRL, 3352 reg_val); 3353 3354 /* clear error bit */ 3355 mdelay(SAS_PHY_BIST_INIT_DELAY); 3356 hisi_sas_phy_read32(hisi_hba, phy_no, SAS_BIST_ERR_CNT); 3357 } else { 3358 /* disable bist test and recover it */ 3359 hisi_hba->debugfs_bist_cnt += hisi_sas_phy_read32(hisi_hba, 3360 phy_no, SAS_BIST_ERR_CNT); 3361 hisi_sas_bist_test_restore_v3_hw(hisi_hba); 3362 } 3363 3364 return 0; 3365 } 3366 3367 static void hisi_sas_map_queues(struct Scsi_Host *shost) 3368 { 3369 struct hisi_hba *hisi_hba = shost_priv(shost); 3370 struct blk_mq_queue_map *qmap; 3371 int i, qoff; 3372 3373 for (i = 0, qoff = 0; i < shost->nr_maps; i++) { 3374 qmap = &shost->tag_set.map[i]; 3375 if (i == HCTX_TYPE_DEFAULT) { 3376 qmap->nr_queues = hisi_hba->cq_nvecs; 3377 } else if (i == HCTX_TYPE_POLL) { 3378 qmap->nr_queues = hisi_hba->iopoll_q_cnt; 3379 } else { 3380 qmap->nr_queues = 0; 3381 continue; 3382 } 3383 3384 /* At least one interrupt hardware queue */ 3385 if (!qmap->nr_queues) 3386 WARN_ON(i == HCTX_TYPE_DEFAULT); 3387 qmap->queue_offset = qoff; 3388 if (i == HCTX_TYPE_POLL) 3389 blk_mq_map_queues(qmap); 3390 else 3391 blk_mq_map_hw_queues(qmap, hisi_hba->dev, 3392 BASE_VECTORS_V3_HW); 3393 qoff += qmap->nr_queues; 3394 } 3395 } 3396 3397 static const struct scsi_host_template sht_v3_hw = { 3398 LIBSAS_SHT_BASE_NO_SLAVE_INIT 3399 .sdev_configure = sdev_configure_v3_hw, 3400 .scan_finished = hisi_sas_scan_finished, 3401 .scan_start = hisi_sas_scan_start, 3402 .map_queues = hisi_sas_map_queues, 3403 .sg_tablesize = HISI_SAS_SGE_PAGE_CNT, 3404 .sg_prot_tablesize = HISI_SAS_SGE_PAGE_CNT, 3405 .sdev_init = hisi_sas_sdev_init, 3406 .shost_groups = host_v3_hw_groups, 3407 .sdev_groups = sdev_groups_v3_hw, 3408 .tag_alloc_policy_rr = true, 3409 .host_reset = hisi_sas_host_reset, 3410 .host_tagset = 1, 3411 .mq_poll = queue_complete_v3_hw, 3412 }; 3413 3414 static const struct hisi_sas_hw hisi_sas_v3_hw = { 3415 .setup_itct = setup_itct_v3_hw, 3416 .get_wideport_bitmap = get_wideport_bitmap_v3_hw, 3417 .complete_hdr_size = sizeof(struct hisi_sas_complete_v3_hdr), 3418 .clear_itct = clear_itct_v3_hw, 3419 .sl_notify_ssp = sl_notify_ssp_v3_hw, 3420 .prep_ssp = prep_ssp_v3_hw, 3421 .prep_smp = prep_smp_v3_hw, 3422 .prep_stp = prep_ata_v3_hw, 3423 .prep_abort = prep_abort_v3_hw, 3424 .start_delivery = start_delivery_v3_hw, 3425 .phys_init = phys_init_v3_hw, 3426 .phy_start = start_phy_v3_hw, 3427 .phy_disable = disable_phy_v3_hw, 3428 .phy_hard_reset = phy_hard_reset_v3_hw, 3429 .phy_get_max_linkrate = phy_get_max_linkrate_v3_hw, 3430 .phy_set_linkrate = phy_set_linkrate_v3_hw, 3431 .dereg_device = dereg_device_v3_hw, 3432 .soft_reset = soft_reset_v3_hw, 3433 .get_phys_state = get_phys_state_v3_hw, 3434 .get_events = phy_get_events_v3_hw, 3435 .write_gpio = write_gpio_v3_hw, 3436 .wait_cmds_complete_timeout = wait_cmds_complete_timeout_v3_hw, 3437 .debugfs_snapshot_regs = debugfs_snapshot_regs_v3_hw, 3438 }; 3439 3440 static int check_fw_info_v3_hw(struct hisi_hba *hisi_hba) 3441 { 3442 struct device *dev = hisi_hba->dev; 3443 3444 if (hisi_hba->n_phy < 0 || hisi_hba->n_phy > 8) { 3445 dev_err(dev, "invalid phy number from FW\n"); 3446 return -EINVAL; 3447 } 3448 3449 if (hisi_hba->queue_count < 0 || hisi_hba->queue_count > 16) { 3450 dev_err(dev, "invalid queue count from FW\n"); 3451 return -EINVAL; 3452 } 3453 3454 return 0; 3455 } 3456 3457 static struct Scsi_Host * 3458 hisi_sas_shost_alloc_pci(struct pci_dev *pdev) 3459 { 3460 struct Scsi_Host *shost; 3461 struct hisi_hba *hisi_hba; 3462 struct device *dev = &pdev->dev; 3463 3464 shost = scsi_host_alloc(&sht_v3_hw, sizeof(*hisi_hba)); 3465 if (!shost) { 3466 dev_err(dev, "shost alloc failed\n"); 3467 return NULL; 3468 } 3469 hisi_hba = shost_priv(shost); 3470 3471 INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler); 3472 hisi_hba->hw = &hisi_sas_v3_hw; 3473 hisi_hba->pci_dev = pdev; 3474 hisi_hba->dev = dev; 3475 hisi_hba->shost = shost; 3476 SHOST_TO_SAS_HA(shost) = &hisi_hba->sha; 3477 3478 if (prot_mask & ~HISI_SAS_PROT_MASK) 3479 dev_err(dev, "unsupported protection mask 0x%x, using default (0x0)\n", 3480 prot_mask); 3481 else 3482 hisi_hba->prot_mask = prot_mask; 3483 3484 if (hisi_sas_get_fw_info(hisi_hba) < 0) 3485 goto err_out; 3486 3487 if (check_fw_info_v3_hw(hisi_hba) < 0) 3488 goto err_out; 3489 3490 if (experimental_iopoll_q_cnt < 0 || 3491 experimental_iopoll_q_cnt >= hisi_hba->queue_count) 3492 dev_err(dev, "iopoll queue count %d cannot exceed or equal 16, using default 0\n", 3493 experimental_iopoll_q_cnt); 3494 else 3495 hisi_hba->iopoll_q_cnt = experimental_iopoll_q_cnt; 3496 3497 if (hisi_sas_alloc(hisi_hba)) { 3498 hisi_sas_free(hisi_hba); 3499 goto err_out; 3500 } 3501 3502 return shost; 3503 err_out: 3504 scsi_host_put(shost); 3505 dev_err(dev, "shost alloc failed\n"); 3506 return NULL; 3507 } 3508 3509 static void debugfs_snapshot_cq_reg_v3_hw(struct hisi_hba *hisi_hba) 3510 { 3511 int queue_entry_size = hisi_hba->hw->complete_hdr_size; 3512 int dump_index = hisi_hba->debugfs_dump_index; 3513 int i; 3514 3515 for (i = 0; i < hisi_hba->queue_count; i++) 3516 memcpy(hisi_hba->debugfs_cq[dump_index][i].complete_hdr, 3517 hisi_hba->complete_hdr[i], 3518 HISI_SAS_QUEUE_SLOTS * queue_entry_size); 3519 } 3520 3521 static void debugfs_snapshot_dq_reg_v3_hw(struct hisi_hba *hisi_hba) 3522 { 3523 int queue_entry_size = sizeof(struct hisi_sas_cmd_hdr); 3524 int dump_index = hisi_hba->debugfs_dump_index; 3525 int i; 3526 3527 for (i = 0; i < hisi_hba->queue_count; i++) { 3528 struct hisi_sas_cmd_hdr *debugfs_cmd_hdr, *cmd_hdr; 3529 int j; 3530 3531 debugfs_cmd_hdr = hisi_hba->debugfs_dq[dump_index][i].hdr; 3532 cmd_hdr = hisi_hba->cmd_hdr[i]; 3533 3534 for (j = 0; j < HISI_SAS_QUEUE_SLOTS; j++) 3535 memcpy(&debugfs_cmd_hdr[j], &cmd_hdr[j], 3536 queue_entry_size); 3537 } 3538 } 3539 3540 static void debugfs_snapshot_port_reg_v3_hw(struct hisi_hba *hisi_hba) 3541 { 3542 int dump_index = hisi_hba->debugfs_dump_index; 3543 const struct hisi_sas_debugfs_reg *port = &debugfs_port_reg; 3544 int i, phy_cnt; 3545 u32 offset; 3546 u32 *databuf; 3547 3548 for (phy_cnt = 0; phy_cnt < hisi_hba->n_phy; phy_cnt++) { 3549 databuf = hisi_hba->debugfs_port_reg[dump_index][phy_cnt].data; 3550 for (i = 0; i < port->count; i++, databuf++) { 3551 offset = port->base_off + HISI_SAS_REG_MEM_SIZE * i; 3552 *databuf = hisi_sas_phy_read32(hisi_hba, phy_cnt, 3553 offset); 3554 } 3555 } 3556 } 3557 3558 static void debugfs_snapshot_global_reg_v3_hw(struct hisi_hba *hisi_hba) 3559 { 3560 int dump_index = hisi_hba->debugfs_dump_index; 3561 u32 *databuf = hisi_hba->debugfs_regs[dump_index][DEBUGFS_GLOBAL].data; 3562 int i; 3563 3564 for (i = 0; i < debugfs_global_reg.count; i++, databuf++) 3565 *databuf = hisi_sas_read32(hisi_hba, 3566 HISI_SAS_REG_MEM_SIZE * i); 3567 } 3568 3569 static void debugfs_snapshot_axi_reg_v3_hw(struct hisi_hba *hisi_hba) 3570 { 3571 int dump_index = hisi_hba->debugfs_dump_index; 3572 u32 *databuf = hisi_hba->debugfs_regs[dump_index][DEBUGFS_AXI].data; 3573 const struct hisi_sas_debugfs_reg *axi = &debugfs_axi_reg; 3574 int i; 3575 3576 for (i = 0; i < axi->count; i++, databuf++) 3577 *databuf = hisi_sas_read32(hisi_hba, 3578 HISI_SAS_REG_MEM_SIZE * i + 3579 axi->base_off); 3580 } 3581 3582 static void debugfs_snapshot_ras_reg_v3_hw(struct hisi_hba *hisi_hba) 3583 { 3584 int dump_index = hisi_hba->debugfs_dump_index; 3585 u32 *databuf = hisi_hba->debugfs_regs[dump_index][DEBUGFS_RAS].data; 3586 const struct hisi_sas_debugfs_reg *ras = &debugfs_ras_reg; 3587 int i; 3588 3589 for (i = 0; i < ras->count; i++, databuf++) 3590 *databuf = hisi_sas_read32(hisi_hba, 3591 HISI_SAS_REG_MEM_SIZE * i + 3592 ras->base_off); 3593 } 3594 3595 static void debugfs_snapshot_itct_reg_v3_hw(struct hisi_hba *hisi_hba) 3596 { 3597 int dump_index = hisi_hba->debugfs_dump_index; 3598 void *cachebuf = hisi_hba->debugfs_itct_cache[dump_index].cache; 3599 void *databuf = hisi_hba->debugfs_itct[dump_index].itct; 3600 struct hisi_sas_itct *itct; 3601 int i; 3602 3603 read_iost_itct_cache_v3_hw(hisi_hba, HISI_SAS_ITCT_CACHE, cachebuf); 3604 3605 itct = hisi_hba->itct; 3606 3607 for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, itct++) { 3608 memcpy(databuf, itct, sizeof(struct hisi_sas_itct)); 3609 databuf += sizeof(struct hisi_sas_itct); 3610 } 3611 } 3612 3613 static void debugfs_snapshot_iost_reg_v3_hw(struct hisi_hba *hisi_hba) 3614 { 3615 int dump_index = hisi_hba->debugfs_dump_index; 3616 int max_command_entries = HISI_SAS_MAX_COMMANDS; 3617 void *cachebuf = hisi_hba->debugfs_iost_cache[dump_index].cache; 3618 void *databuf = hisi_hba->debugfs_iost[dump_index].iost; 3619 struct hisi_sas_iost *iost; 3620 int i; 3621 3622 read_iost_itct_cache_v3_hw(hisi_hba, HISI_SAS_IOST_CACHE, cachebuf); 3623 3624 iost = hisi_hba->iost; 3625 3626 for (i = 0; i < max_command_entries; i++, iost++) { 3627 memcpy(databuf, iost, sizeof(struct hisi_sas_iost)); 3628 databuf += sizeof(struct hisi_sas_iost); 3629 } 3630 } 3631 3632 static const char * 3633 debugfs_to_reg_name_v3_hw(int off, int base_off, 3634 const struct hisi_sas_debugfs_reg_lu *lu) 3635 { 3636 for (; lu->name; lu++) { 3637 if (off == lu->off - base_off) 3638 return lu->name; 3639 } 3640 3641 return NULL; 3642 } 3643 3644 static bool debugfs_dump_is_generated_v3_hw(void *p) 3645 { 3646 return p ? true : false; 3647 } 3648 3649 static void debugfs_print_reg_v3_hw(u32 *regs_val, struct seq_file *s, 3650 const struct hisi_sas_debugfs_reg *reg) 3651 { 3652 int i; 3653 3654 for (i = 0; i < reg->count; i++) { 3655 int off = i * HISI_SAS_REG_MEM_SIZE; 3656 const char *name; 3657 3658 name = debugfs_to_reg_name_v3_hw(off, reg->base_off, 3659 reg->lu); 3660 if (name) 3661 seq_printf(s, "0x%08x 0x%08x %s\n", off, 3662 regs_val[i], name); 3663 else 3664 seq_printf(s, "0x%08x 0x%08x\n", off, 3665 regs_val[i]); 3666 } 3667 } 3668 3669 static int debugfs_global_v3_hw_show(struct seq_file *s, void *p) 3670 { 3671 struct hisi_sas_debugfs_regs *global = s->private; 3672 3673 if (!debugfs_dump_is_generated_v3_hw(global->data)) 3674 return -EPERM; 3675 3676 debugfs_print_reg_v3_hw(global->data, s, 3677 &debugfs_global_reg); 3678 3679 return 0; 3680 } 3681 DEFINE_SHOW_ATTRIBUTE(debugfs_global_v3_hw); 3682 3683 static int debugfs_axi_v3_hw_show(struct seq_file *s, void *p) 3684 { 3685 struct hisi_sas_debugfs_regs *axi = s->private; 3686 3687 if (!debugfs_dump_is_generated_v3_hw(axi->data)) 3688 return -EPERM; 3689 3690 debugfs_print_reg_v3_hw(axi->data, s, 3691 &debugfs_axi_reg); 3692 3693 return 0; 3694 } 3695 DEFINE_SHOW_ATTRIBUTE(debugfs_axi_v3_hw); 3696 3697 static int debugfs_ras_v3_hw_show(struct seq_file *s, void *p) 3698 { 3699 struct hisi_sas_debugfs_regs *ras = s->private; 3700 3701 if (!debugfs_dump_is_generated_v3_hw(ras->data)) 3702 return -EPERM; 3703 3704 debugfs_print_reg_v3_hw(ras->data, s, 3705 &debugfs_ras_reg); 3706 3707 return 0; 3708 } 3709 DEFINE_SHOW_ATTRIBUTE(debugfs_ras_v3_hw); 3710 3711 static int debugfs_port_v3_hw_show(struct seq_file *s, void *p) 3712 { 3713 struct hisi_sas_debugfs_port *port = s->private; 3714 const struct hisi_sas_debugfs_reg *reg_port = &debugfs_port_reg; 3715 3716 if (!debugfs_dump_is_generated_v3_hw(port->data)) 3717 return -EPERM; 3718 3719 debugfs_print_reg_v3_hw(port->data, s, reg_port); 3720 3721 return 0; 3722 } 3723 DEFINE_SHOW_ATTRIBUTE(debugfs_port_v3_hw); 3724 3725 static void debugfs_show_row_64_v3_hw(struct seq_file *s, int index, 3726 int sz, __le64 *ptr) 3727 { 3728 int i; 3729 3730 /* completion header size not fixed per HW version */ 3731 seq_printf(s, "index %04d:\n\t", index); 3732 for (i = 1; i <= sz / BYTE_TO_DDW; i++, ptr++) { 3733 seq_printf(s, " 0x%016llx", le64_to_cpu(*ptr)); 3734 if (!(i % TWO_PARA_PER_LINE)) 3735 seq_puts(s, "\n\t"); 3736 } 3737 3738 seq_puts(s, "\n"); 3739 } 3740 3741 static void debugfs_show_row_32_v3_hw(struct seq_file *s, int index, 3742 int sz, __le32 *ptr) 3743 { 3744 int i; 3745 3746 /* completion header size not fixed per HW version */ 3747 seq_printf(s, "index %04d:\n\t", index); 3748 for (i = 1; i <= sz / BYTE_TO_DW; i++, ptr++) { 3749 seq_printf(s, " 0x%08x", le32_to_cpu(*ptr)); 3750 if (!(i % FOUR_PARA_PER_LINE)) 3751 seq_puts(s, "\n\t"); 3752 } 3753 seq_puts(s, "\n"); 3754 } 3755 3756 static void debugfs_cq_show_slot_v3_hw(struct seq_file *s, int slot, 3757 struct hisi_sas_debugfs_cq *debugfs_cq) 3758 { 3759 struct hisi_sas_cq *cq = debugfs_cq->cq; 3760 struct hisi_hba *hisi_hba = cq->hisi_hba; 3761 __le32 *complete_hdr = debugfs_cq->complete_hdr + 3762 (hisi_hba->hw->complete_hdr_size * slot); 3763 3764 debugfs_show_row_32_v3_hw(s, slot, 3765 hisi_hba->hw->complete_hdr_size, 3766 complete_hdr); 3767 } 3768 3769 static int debugfs_cq_v3_hw_show(struct seq_file *s, void *p) 3770 { 3771 struct hisi_sas_debugfs_cq *debugfs_cq = s->private; 3772 int slot; 3773 3774 if (!debugfs_dump_is_generated_v3_hw(debugfs_cq->complete_hdr)) 3775 return -EPERM; 3776 3777 for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++) 3778 debugfs_cq_show_slot_v3_hw(s, slot, debugfs_cq); 3779 3780 return 0; 3781 } 3782 DEFINE_SHOW_ATTRIBUTE(debugfs_cq_v3_hw); 3783 3784 static void debugfs_dq_show_slot_v3_hw(struct seq_file *s, int slot, 3785 void *dq_ptr) 3786 { 3787 struct hisi_sas_debugfs_dq *debugfs_dq = dq_ptr; 3788 void *cmd_queue = debugfs_dq->hdr; 3789 __le32 *cmd_hdr = cmd_queue + 3790 sizeof(struct hisi_sas_cmd_hdr) * slot; 3791 3792 debugfs_show_row_32_v3_hw(s, slot, sizeof(struct hisi_sas_cmd_hdr), 3793 cmd_hdr); 3794 } 3795 3796 static int debugfs_dq_v3_hw_show(struct seq_file *s, void *p) 3797 { 3798 struct hisi_sas_debugfs_dq *debugfs_dq = s->private; 3799 int slot; 3800 3801 if (!debugfs_dump_is_generated_v3_hw(debugfs_dq->hdr)) 3802 return -EPERM; 3803 3804 for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++) 3805 debugfs_dq_show_slot_v3_hw(s, slot, s->private); 3806 3807 return 0; 3808 } 3809 DEFINE_SHOW_ATTRIBUTE(debugfs_dq_v3_hw); 3810 3811 static int debugfs_iost_v3_hw_show(struct seq_file *s, void *p) 3812 { 3813 struct hisi_sas_debugfs_iost *debugfs_iost = s->private; 3814 struct hisi_sas_iost *iost = debugfs_iost->iost; 3815 int i, max_command_entries = HISI_SAS_MAX_COMMANDS; 3816 3817 if (!debugfs_dump_is_generated_v3_hw(iost)) 3818 return -EPERM; 3819 3820 for (i = 0; i < max_command_entries; i++, iost++) { 3821 __le64 *data = &iost->qw0; 3822 3823 debugfs_show_row_64_v3_hw(s, i, sizeof(*iost), data); 3824 } 3825 3826 return 0; 3827 } 3828 DEFINE_SHOW_ATTRIBUTE(debugfs_iost_v3_hw); 3829 3830 static int debugfs_iost_cache_v3_hw_show(struct seq_file *s, void *p) 3831 { 3832 struct hisi_sas_debugfs_iost_cache *debugfs_iost_cache = s->private; 3833 struct hisi_sas_iost_itct_cache *iost_cache = 3834 debugfs_iost_cache->cache; 3835 u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * BYTE_TO_DW; 3836 int i, tab_idx; 3837 __le64 *iost; 3838 3839 if (!debugfs_dump_is_generated_v3_hw(iost_cache)) 3840 return -EPERM; 3841 3842 for (i = 0; i < HISI_SAS_IOST_ITCT_CACHE_NUM; i++, iost_cache++) { 3843 /* 3844 * Data struct of IOST cache: 3845 * Data[1]: BIT0~15: Table index 3846 * Bit16: Valid mask 3847 * Data[2]~[9]: IOST table 3848 */ 3849 tab_idx = (iost_cache->data[1] & 0xffff); 3850 iost = (__le64 *)iost_cache; 3851 3852 debugfs_show_row_64_v3_hw(s, tab_idx, cache_size, iost); 3853 } 3854 3855 return 0; 3856 } 3857 DEFINE_SHOW_ATTRIBUTE(debugfs_iost_cache_v3_hw); 3858 3859 static int debugfs_itct_v3_hw_show(struct seq_file *s, void *p) 3860 { 3861 int i; 3862 struct hisi_sas_debugfs_itct *debugfs_itct = s->private; 3863 struct hisi_sas_itct *itct = debugfs_itct->itct; 3864 3865 if (!debugfs_dump_is_generated_v3_hw(itct)) 3866 return -EPERM; 3867 3868 for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, itct++) { 3869 __le64 *data = &itct->qw0; 3870 3871 debugfs_show_row_64_v3_hw(s, i, sizeof(*itct), data); 3872 } 3873 3874 return 0; 3875 } 3876 DEFINE_SHOW_ATTRIBUTE(debugfs_itct_v3_hw); 3877 3878 static int debugfs_itct_cache_v3_hw_show(struct seq_file *s, void *p) 3879 { 3880 struct hisi_sas_debugfs_itct_cache *debugfs_itct_cache = s->private; 3881 struct hisi_sas_iost_itct_cache *itct_cache = 3882 debugfs_itct_cache->cache; 3883 u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * BYTE_TO_DW; 3884 int i, tab_idx; 3885 __le64 *itct; 3886 3887 if (!debugfs_dump_is_generated_v3_hw(itct_cache)) 3888 return -EPERM; 3889 3890 for (i = 0; i < HISI_SAS_IOST_ITCT_CACHE_NUM; i++, itct_cache++) { 3891 /* 3892 * Data struct of ITCT cache: 3893 * Data[1]: BIT0~15: Table index 3894 * Bit16: Valid mask 3895 * Data[2]~[9]: ITCT table 3896 */ 3897 tab_idx = itct_cache->data[1] & 0xffff; 3898 itct = (__le64 *)itct_cache; 3899 3900 debugfs_show_row_64_v3_hw(s, tab_idx, cache_size, itct); 3901 } 3902 3903 return 0; 3904 } 3905 DEFINE_SHOW_ATTRIBUTE(debugfs_itct_cache_v3_hw); 3906 3907 static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba, int index) 3908 { 3909 u64 *debugfs_timestamp; 3910 struct dentry *dump_dentry; 3911 struct dentry *dentry; 3912 char name[NAME_BUF_SIZE]; 3913 int p; 3914 int c; 3915 int d; 3916 3917 snprintf(name, NAME_BUF_SIZE, "%d", index); 3918 3919 dump_dentry = debugfs_create_dir(name, hisi_hba->debugfs_dump_dentry); 3920 3921 debugfs_timestamp = &hisi_hba->debugfs_timestamp[index]; 3922 3923 debugfs_create_u64("timestamp", 0400, dump_dentry, 3924 debugfs_timestamp); 3925 3926 debugfs_create_file("global", 0400, dump_dentry, 3927 &hisi_hba->debugfs_regs[index][DEBUGFS_GLOBAL], 3928 &debugfs_global_v3_hw_fops); 3929 3930 /* Create port dir and files */ 3931 dentry = debugfs_create_dir("port", dump_dentry); 3932 for (p = 0; p < hisi_hba->n_phy; p++) { 3933 snprintf(name, NAME_BUF_SIZE, "%d", p); 3934 3935 debugfs_create_file(name, 0400, dentry, 3936 &hisi_hba->debugfs_port_reg[index][p], 3937 &debugfs_port_v3_hw_fops); 3938 } 3939 3940 /* Create CQ dir and files */ 3941 dentry = debugfs_create_dir("cq", dump_dentry); 3942 for (c = 0; c < hisi_hba->queue_count; c++) { 3943 snprintf(name, NAME_BUF_SIZE, "%d", c); 3944 3945 debugfs_create_file(name, 0400, dentry, 3946 &hisi_hba->debugfs_cq[index][c], 3947 &debugfs_cq_v3_hw_fops); 3948 } 3949 3950 /* Create DQ dir and files */ 3951 dentry = debugfs_create_dir("dq", dump_dentry); 3952 for (d = 0; d < hisi_hba->queue_count; d++) { 3953 snprintf(name, NAME_BUF_SIZE, "%d", d); 3954 3955 debugfs_create_file(name, 0400, dentry, 3956 &hisi_hba->debugfs_dq[index][d], 3957 &debugfs_dq_v3_hw_fops); 3958 } 3959 3960 debugfs_create_file("iost", 0400, dump_dentry, 3961 &hisi_hba->debugfs_iost[index], 3962 &debugfs_iost_v3_hw_fops); 3963 3964 debugfs_create_file("iost_cache", 0400, dump_dentry, 3965 &hisi_hba->debugfs_iost_cache[index], 3966 &debugfs_iost_cache_v3_hw_fops); 3967 3968 debugfs_create_file("itct", 0400, dump_dentry, 3969 &hisi_hba->debugfs_itct[index], 3970 &debugfs_itct_v3_hw_fops); 3971 3972 debugfs_create_file("itct_cache", 0400, dump_dentry, 3973 &hisi_hba->debugfs_itct_cache[index], 3974 &debugfs_itct_cache_v3_hw_fops); 3975 3976 debugfs_create_file("axi", 0400, dump_dentry, 3977 &hisi_hba->debugfs_regs[index][DEBUGFS_AXI], 3978 &debugfs_axi_v3_hw_fops); 3979 3980 debugfs_create_file("ras", 0400, dump_dentry, 3981 &hisi_hba->debugfs_regs[index][DEBUGFS_RAS], 3982 &debugfs_ras_v3_hw_fops); 3983 } 3984 3985 static ssize_t debugfs_trigger_dump_v3_hw_write(struct file *file, 3986 const char __user *user_buf, 3987 size_t count, loff_t *ppos) 3988 { 3989 struct hisi_hba *hisi_hba = file->f_inode->i_private; 3990 char buf[DUMP_BUF_SIZE]; 3991 3992 if (count > DUMP_BUF_SIZE) 3993 return -EFAULT; 3994 3995 if (copy_from_user(buf, user_buf, count)) 3996 return -EFAULT; 3997 3998 if (buf[0] != '1') 3999 return -EFAULT; 4000 4001 down(&hisi_hba->sem); 4002 if (debugfs_snapshot_regs_v3_hw(hisi_hba)) { 4003 up(&hisi_hba->sem); 4004 return -EFAULT; 4005 } 4006 up(&hisi_hba->sem); 4007 4008 return count; 4009 } 4010 4011 static const struct file_operations debugfs_trigger_dump_v3_hw_fops = { 4012 .write = &debugfs_trigger_dump_v3_hw_write, 4013 .owner = THIS_MODULE, 4014 }; 4015 4016 enum { 4017 HISI_SAS_BIST_LOOPBACK_MODE_DIGITAL = 0, 4018 HISI_SAS_BIST_LOOPBACK_MODE_SERDES, 4019 HISI_SAS_BIST_LOOPBACK_MODE_REMOTE, 4020 }; 4021 4022 static const struct { 4023 int value; 4024 char *name; 4025 } debugfs_loop_linkrate_v3_hw[] = { 4026 { SAS_LINK_RATE_1_5_GBPS, "1.5 Gbit" }, 4027 { SAS_LINK_RATE_3_0_GBPS, "3.0 Gbit" }, 4028 { SAS_LINK_RATE_6_0_GBPS, "6.0 Gbit" }, 4029 { SAS_LINK_RATE_12_0_GBPS, "12.0 Gbit" }, 4030 }; 4031 4032 static int debugfs_bist_linkrate_v3_hw_show(struct seq_file *s, void *p) 4033 { 4034 struct hisi_hba *hisi_hba = s->private; 4035 int i; 4036 4037 for (i = 0; i < ARRAY_SIZE(debugfs_loop_linkrate_v3_hw); i++) { 4038 int match = (hisi_hba->debugfs_bist_linkrate == 4039 debugfs_loop_linkrate_v3_hw[i].value); 4040 4041 seq_printf(s, "%s%s%s ", match ? "[" : "", 4042 debugfs_loop_linkrate_v3_hw[i].name, 4043 match ? "]" : ""); 4044 } 4045 seq_puts(s, "\n"); 4046 4047 return 0; 4048 } 4049 4050 static ssize_t debugfs_bist_linkrate_v3_hw_write(struct file *filp, 4051 const char __user *buf, 4052 size_t count, loff_t *ppos) 4053 { 4054 struct seq_file *m = filp->private_data; 4055 struct hisi_hba *hisi_hba = m->private; 4056 char kbuf[BIST_BUF_SIZE] = {}, *pkbuf; 4057 bool found = false; 4058 int i; 4059 4060 if (hisi_hba->debugfs_bist_enable) 4061 return -EPERM; 4062 4063 if (count >= sizeof(kbuf)) 4064 return -EOVERFLOW; 4065 4066 if (copy_from_user(kbuf, buf, count)) 4067 return -EINVAL; 4068 4069 pkbuf = strstrip(kbuf); 4070 4071 for (i = 0; i < ARRAY_SIZE(debugfs_loop_linkrate_v3_hw); i++) { 4072 if (!strncmp(debugfs_loop_linkrate_v3_hw[i].name, 4073 pkbuf, BIST_BUF_SIZE)) { 4074 hisi_hba->debugfs_bist_linkrate = 4075 debugfs_loop_linkrate_v3_hw[i].value; 4076 found = true; 4077 break; 4078 } 4079 } 4080 4081 if (!found) 4082 return -EINVAL; 4083 4084 return count; 4085 } 4086 DEFINE_SHOW_STORE_ATTRIBUTE(debugfs_bist_linkrate_v3_hw); 4087 4088 static const struct { 4089 int value; 4090 char *name; 4091 } debugfs_loop_code_mode_v3_hw[] = { 4092 { HISI_SAS_BIST_CODE_MODE_PRBS7, "PRBS7" }, 4093 { HISI_SAS_BIST_CODE_MODE_PRBS23, "PRBS23" }, 4094 { HISI_SAS_BIST_CODE_MODE_PRBS31, "PRBS31" }, 4095 { HISI_SAS_BIST_CODE_MODE_JTPAT, "JTPAT" }, 4096 { HISI_SAS_BIST_CODE_MODE_CJTPAT, "CJTPAT" }, 4097 { HISI_SAS_BIST_CODE_MODE_SCRAMBED_0, "SCRAMBED_0" }, 4098 { HISI_SAS_BIST_CODE_MODE_TRAIN, "TRAIN" }, 4099 { HISI_SAS_BIST_CODE_MODE_TRAIN_DONE, "TRAIN_DONE" }, 4100 { HISI_SAS_BIST_CODE_MODE_HFTP, "HFTP" }, 4101 { HISI_SAS_BIST_CODE_MODE_MFTP, "MFTP" }, 4102 { HISI_SAS_BIST_CODE_MODE_LFTP, "LFTP" }, 4103 { HISI_SAS_BIST_CODE_MODE_FIXED_DATA, "FIXED_DATA" }, 4104 }; 4105 4106 static int debugfs_bist_code_mode_v3_hw_show(struct seq_file *s, void *p) 4107 { 4108 struct hisi_hba *hisi_hba = s->private; 4109 int i; 4110 4111 for (i = 0; i < ARRAY_SIZE(debugfs_loop_code_mode_v3_hw); i++) { 4112 int match = (hisi_hba->debugfs_bist_code_mode == 4113 debugfs_loop_code_mode_v3_hw[i].value); 4114 4115 seq_printf(s, "%s%s%s ", match ? "[" : "", 4116 debugfs_loop_code_mode_v3_hw[i].name, 4117 match ? "]" : ""); 4118 } 4119 seq_puts(s, "\n"); 4120 4121 return 0; 4122 } 4123 4124 static ssize_t debugfs_bist_code_mode_v3_hw_write(struct file *filp, 4125 const char __user *buf, 4126 size_t count, 4127 loff_t *ppos) 4128 { 4129 struct seq_file *m = filp->private_data; 4130 struct hisi_hba *hisi_hba = m->private; 4131 char kbuf[BIST_BUF_SIZE] = {}, *pkbuf; 4132 bool found = false; 4133 int i; 4134 4135 if (hisi_hba->debugfs_bist_enable) 4136 return -EPERM; 4137 4138 if (count >= sizeof(kbuf)) 4139 return -EINVAL; 4140 4141 if (copy_from_user(kbuf, buf, count)) 4142 return -EOVERFLOW; 4143 4144 pkbuf = strstrip(kbuf); 4145 4146 for (i = 0; i < ARRAY_SIZE(debugfs_loop_code_mode_v3_hw); i++) { 4147 if (!strncmp(debugfs_loop_code_mode_v3_hw[i].name, 4148 pkbuf, BIST_BUF_SIZE)) { 4149 hisi_hba->debugfs_bist_code_mode = 4150 debugfs_loop_code_mode_v3_hw[i].value; 4151 found = true; 4152 break; 4153 } 4154 } 4155 4156 if (!found) 4157 return -EINVAL; 4158 4159 return count; 4160 } 4161 DEFINE_SHOW_STORE_ATTRIBUTE(debugfs_bist_code_mode_v3_hw); 4162 4163 static ssize_t debugfs_bist_phy_v3_hw_write(struct file *filp, 4164 const char __user *buf, 4165 size_t count, loff_t *ppos) 4166 { 4167 struct seq_file *m = filp->private_data; 4168 struct hisi_hba *hisi_hba = m->private; 4169 unsigned int phy_no; 4170 int val; 4171 4172 if (hisi_hba->debugfs_bist_enable) 4173 return -EPERM; 4174 4175 val = kstrtouint_from_user(buf, count, 0, &phy_no); 4176 if (val) 4177 return val; 4178 4179 if (phy_no >= hisi_hba->n_phy) 4180 return -EINVAL; 4181 4182 hisi_hba->debugfs_bist_phy_no = phy_no; 4183 4184 return count; 4185 } 4186 4187 static int debugfs_bist_phy_v3_hw_show(struct seq_file *s, void *p) 4188 { 4189 struct hisi_hba *hisi_hba = s->private; 4190 4191 seq_printf(s, "%d\n", hisi_hba->debugfs_bist_phy_no); 4192 4193 return 0; 4194 } 4195 DEFINE_SHOW_STORE_ATTRIBUTE(debugfs_bist_phy_v3_hw); 4196 4197 static ssize_t debugfs_bist_cnt_v3_hw_write(struct file *filp, 4198 const char __user *buf, 4199 size_t count, loff_t *ppos) 4200 { 4201 struct seq_file *m = filp->private_data; 4202 struct hisi_hba *hisi_hba = m->private; 4203 unsigned int cnt; 4204 int val; 4205 4206 if (hisi_hba->debugfs_bist_enable) 4207 return -EPERM; 4208 4209 val = kstrtouint_from_user(buf, count, 0, &cnt); 4210 if (val) 4211 return val; 4212 4213 if (cnt) 4214 return -EINVAL; 4215 4216 hisi_hba->debugfs_bist_cnt = 0; 4217 return count; 4218 } 4219 4220 static int debugfs_bist_cnt_v3_hw_show(struct seq_file *s, void *p) 4221 { 4222 struct hisi_hba *hisi_hba = s->private; 4223 4224 seq_printf(s, "%u\n", hisi_hba->debugfs_bist_cnt); 4225 4226 return 0; 4227 } 4228 DEFINE_SHOW_STORE_ATTRIBUTE(debugfs_bist_cnt_v3_hw); 4229 4230 static const struct { 4231 int value; 4232 char *name; 4233 } debugfs_loop_modes_v3_hw[] = { 4234 { HISI_SAS_BIST_LOOPBACK_MODE_DIGITAL, "digital" }, 4235 { HISI_SAS_BIST_LOOPBACK_MODE_SERDES, "serdes" }, 4236 { HISI_SAS_BIST_LOOPBACK_MODE_REMOTE, "remote" }, 4237 }; 4238 4239 static int debugfs_bist_mode_v3_hw_show(struct seq_file *s, void *p) 4240 { 4241 struct hisi_hba *hisi_hba = s->private; 4242 int i; 4243 4244 for (i = 0; i < ARRAY_SIZE(debugfs_loop_modes_v3_hw); i++) { 4245 int match = (hisi_hba->debugfs_bist_mode == 4246 debugfs_loop_modes_v3_hw[i].value); 4247 4248 seq_printf(s, "%s%s%s ", match ? "[" : "", 4249 debugfs_loop_modes_v3_hw[i].name, 4250 match ? "]" : ""); 4251 } 4252 seq_puts(s, "\n"); 4253 4254 return 0; 4255 } 4256 4257 static ssize_t debugfs_bist_mode_v3_hw_write(struct file *filp, 4258 const char __user *buf, 4259 size_t count, loff_t *ppos) 4260 { 4261 struct seq_file *m = filp->private_data; 4262 struct hisi_hba *hisi_hba = m->private; 4263 char kbuf[BIST_BUF_SIZE] = {}, *pkbuf; 4264 bool found = false; 4265 int i; 4266 4267 if (hisi_hba->debugfs_bist_enable) 4268 return -EPERM; 4269 4270 if (count >= sizeof(kbuf)) 4271 return -EINVAL; 4272 4273 if (copy_from_user(kbuf, buf, count)) 4274 return -EOVERFLOW; 4275 4276 pkbuf = strstrip(kbuf); 4277 4278 for (i = 0; i < ARRAY_SIZE(debugfs_loop_modes_v3_hw); i++) { 4279 if (!strncmp(debugfs_loop_modes_v3_hw[i].name, pkbuf, 4280 BIST_BUF_SIZE)) { 4281 hisi_hba->debugfs_bist_mode = 4282 debugfs_loop_modes_v3_hw[i].value; 4283 found = true; 4284 break; 4285 } 4286 } 4287 4288 if (!found) 4289 return -EINVAL; 4290 4291 return count; 4292 } 4293 DEFINE_SHOW_STORE_ATTRIBUTE(debugfs_bist_mode_v3_hw); 4294 4295 static ssize_t debugfs_bist_enable_v3_hw_write(struct file *filp, 4296 const char __user *buf, 4297 size_t count, loff_t *ppos) 4298 { 4299 struct seq_file *m = filp->private_data; 4300 struct hisi_hba *hisi_hba = m->private; 4301 unsigned int enable; 4302 int val; 4303 4304 val = kstrtouint_from_user(buf, count, 0, &enable); 4305 if (val) 4306 return val; 4307 4308 if (enable > 1) 4309 return -EINVAL; 4310 4311 if (enable == hisi_hba->debugfs_bist_enable) 4312 return count; 4313 4314 val = debugfs_set_bist_v3_hw(hisi_hba, enable); 4315 if (val < 0) 4316 return val; 4317 4318 hisi_hba->debugfs_bist_enable = enable; 4319 4320 return count; 4321 } 4322 4323 static int debugfs_bist_enable_v3_hw_show(struct seq_file *s, void *p) 4324 { 4325 struct hisi_hba *hisi_hba = s->private; 4326 4327 seq_printf(s, "%d\n", hisi_hba->debugfs_bist_enable); 4328 4329 return 0; 4330 } 4331 DEFINE_SHOW_STORE_ATTRIBUTE(debugfs_bist_enable_v3_hw); 4332 4333 static const struct { 4334 char *name; 4335 } debugfs_ffe_name_v3_hw[FFE_CFG_MAX] = { 4336 { "SAS_1_5_GBPS" }, 4337 { "SAS_3_0_GBPS" }, 4338 { "SAS_6_0_GBPS" }, 4339 { "SAS_12_0_GBPS" }, 4340 { "FFE_RESV" }, 4341 { "SATA_1_5_GBPS" }, 4342 { "SATA_3_0_GBPS" }, 4343 { "SATA_6_0_GBPS" }, 4344 }; 4345 4346 static ssize_t debugfs_v3_hw_write(struct file *filp, 4347 const char __user *buf, 4348 size_t count, loff_t *ppos) 4349 { 4350 struct seq_file *m = filp->private_data; 4351 u32 *val = m->private; 4352 int res; 4353 4354 res = kstrtouint_from_user(buf, count, 0, val); 4355 if (res) 4356 return res; 4357 4358 return count; 4359 } 4360 4361 static int debugfs_v3_hw_show(struct seq_file *s, void *p) 4362 { 4363 u32 *val = s->private; 4364 4365 seq_printf(s, "0x%x\n", *val); 4366 4367 return 0; 4368 } 4369 DEFINE_SHOW_STORE_ATTRIBUTE(debugfs_v3_hw); 4370 4371 static ssize_t debugfs_phy_down_cnt_v3_hw_write(struct file *filp, 4372 const char __user *buf, 4373 size_t count, loff_t *ppos) 4374 { 4375 struct seq_file *s = filp->private_data; 4376 struct hisi_sas_phy *phy = s->private; 4377 unsigned int set_val; 4378 int res; 4379 4380 res = kstrtouint_from_user(buf, count, 0, &set_val); 4381 if (res) 4382 return res; 4383 4384 if (set_val > 0) 4385 return -EINVAL; 4386 4387 atomic_set(&phy->down_cnt, 0); 4388 4389 return count; 4390 } 4391 4392 static int debugfs_phy_down_cnt_v3_hw_show(struct seq_file *s, void *p) 4393 { 4394 struct hisi_sas_phy *phy = s->private; 4395 4396 seq_printf(s, "%d\n", atomic_read(&phy->down_cnt)); 4397 4398 return 0; 4399 } 4400 DEFINE_SHOW_STORE_ATTRIBUTE(debugfs_phy_down_cnt_v3_hw); 4401 4402 enum fifo_dump_mode_v3_hw { 4403 FIFO_DUMP_FORVER = (1U << 0), 4404 FIFO_DUMP_AFTER_TRIGGER = (1U << 1), 4405 FIFO_DUMP_UNTILL_TRIGGER = (1U << 2), 4406 }; 4407 4408 enum fifo_trigger_mode_v3_hw { 4409 FIFO_TRIGGER_EDGE = (1U << 0), 4410 FIFO_TRIGGER_SAME_LEVEL = (1U << 1), 4411 FIFO_TRIGGER_DIFF_LEVEL = (1U << 2), 4412 }; 4413 4414 static int debugfs_is_fifo_config_valid_v3_hw(struct hisi_sas_phy *phy) 4415 { 4416 struct hisi_hba *hisi_hba = phy->hisi_hba; 4417 4418 if (phy->fifo.signal_sel > 0xf) { 4419 dev_info(hisi_hba->dev, "Invalid signal select: %u\n", 4420 phy->fifo.signal_sel); 4421 return -EINVAL; 4422 } 4423 4424 switch (phy->fifo.dump_mode) { 4425 case FIFO_DUMP_FORVER: 4426 case FIFO_DUMP_AFTER_TRIGGER: 4427 case FIFO_DUMP_UNTILL_TRIGGER: 4428 break; 4429 default: 4430 dev_info(hisi_hba->dev, "Invalid dump mode: %u\n", 4431 phy->fifo.dump_mode); 4432 return -EINVAL; 4433 } 4434 4435 /* when FIFO_DUMP_FORVER, no need to check trigger_mode */ 4436 if (phy->fifo.dump_mode == FIFO_DUMP_FORVER) 4437 return 0; 4438 4439 switch (phy->fifo.trigger_mode) { 4440 case FIFO_TRIGGER_EDGE: 4441 case FIFO_TRIGGER_SAME_LEVEL: 4442 case FIFO_TRIGGER_DIFF_LEVEL: 4443 break; 4444 default: 4445 dev_info(hisi_hba->dev, "Invalid trigger mode: %u\n", 4446 phy->fifo.trigger_mode); 4447 return -EINVAL; 4448 } 4449 return 0; 4450 } 4451 4452 static int debugfs_update_fifo_config_v3_hw(struct hisi_sas_phy *phy) 4453 { 4454 u32 trigger_mode = phy->fifo.trigger_mode; 4455 u32 signal_sel = phy->fifo.signal_sel; 4456 u32 dump_mode = phy->fifo.dump_mode; 4457 struct hisi_hba *hisi_hba = phy->hisi_hba; 4458 int phy_no = phy->sas_phy.id; 4459 u32 reg_val; 4460 int res; 4461 4462 /* Check the validity of trace FIFO configuration */ 4463 res = debugfs_is_fifo_config_valid_v3_hw(phy); 4464 if (res) 4465 return res; 4466 4467 reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, DFX_FIFO_CTRL); 4468 /* Disable trace FIFO before update configuration */ 4469 reg_val |= DFX_FIFO_CTRL_DUMP_DISABLE_MSK; 4470 4471 /* Update trace FIFO configuration */ 4472 reg_val &= ~(DFX_FIFO_CTRL_DUMP_MODE_MSK | 4473 DFX_FIFO_CTRL_SIGNAL_SEL_MSK | 4474 DFX_FIFO_CTRL_TRIGGER_MODE_MSK); 4475 4476 reg_val |= ((trigger_mode << DFX_FIFO_CTRL_TRIGGER_MODE_OFF) | 4477 (dump_mode << DFX_FIFO_CTRL_DUMP_MODE_OFF) | 4478 (signal_sel << DFX_FIFO_CTRL_SIGNAL_SEL_OFF)); 4479 hisi_sas_phy_write32(hisi_hba, phy_no, DFX_FIFO_CTRL, reg_val); 4480 4481 hisi_sas_phy_write32(hisi_hba, phy_no, DFX_FIFO_DUMP_MSK, 4482 phy->fifo.dump_msk); 4483 4484 hisi_sas_phy_write32(hisi_hba, phy_no, DFX_FIFO_TRIGGER, 4485 phy->fifo.trigger); 4486 4487 hisi_sas_phy_write32(hisi_hba, phy_no, DFX_FIFO_TRIGGER_MSK, 4488 phy->fifo.trigger_msk); 4489 4490 /* Enable trace FIFO after updated configuration */ 4491 reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, DFX_FIFO_CTRL); 4492 reg_val &= ~DFX_FIFO_CTRL_DUMP_DISABLE_MSK; 4493 hisi_sas_phy_write32(hisi_hba, phy_no, DFX_FIFO_CTRL, reg_val); 4494 4495 return 0; 4496 } 4497 4498 static ssize_t debugfs_fifo_update_cfg_v3_hw_write(struct file *filp, 4499 const char __user *buf, 4500 size_t count, loff_t *ppos) 4501 { 4502 struct hisi_sas_phy *phy = filp->private_data; 4503 bool update; 4504 int val; 4505 4506 val = kstrtobool_from_user(buf, count, &update); 4507 if (val) 4508 return val; 4509 4510 if (update != 1) 4511 return -EINVAL; 4512 4513 val = debugfs_update_fifo_config_v3_hw(phy); 4514 if (val) 4515 return val; 4516 4517 return count; 4518 } 4519 4520 static const struct file_operations debugfs_fifo_update_cfg_v3_hw_fops = { 4521 .open = simple_open, 4522 .write = debugfs_fifo_update_cfg_v3_hw_write, 4523 .owner = THIS_MODULE, 4524 }; 4525 4526 static void debugfs_read_fifo_data_v3_hw(struct hisi_sas_phy *phy) 4527 { 4528 struct hisi_hba *hisi_hba = phy->hisi_hba; 4529 u32 *buf = phy->fifo.rd_data; 4530 int phy_no = phy->sas_phy.id; 4531 u32 val; 4532 int i; 4533 4534 memset(buf, 0, sizeof(phy->fifo.rd_data)); 4535 4536 /* Disable trace FIFO before read data */ 4537 val = hisi_sas_phy_read32(hisi_hba, phy_no, DFX_FIFO_CTRL); 4538 val |= DFX_FIFO_CTRL_DUMP_DISABLE_MSK; 4539 hisi_sas_phy_write32(hisi_hba, phy_no, DFX_FIFO_CTRL, val); 4540 4541 for (i = 0; i < HISI_SAS_FIFO_DATA_DW_SIZE; i++) { 4542 val = hisi_sas_phy_read32(hisi_hba, phy_no, 4543 DFX_FIFO_RD_DATA); 4544 buf[i] = val; 4545 } 4546 4547 /* Enable trace FIFO after read data */ 4548 val = hisi_sas_phy_read32(hisi_hba, phy_no, DFX_FIFO_CTRL); 4549 val &= ~DFX_FIFO_CTRL_DUMP_DISABLE_MSK; 4550 hisi_sas_phy_write32(hisi_hba, phy_no, DFX_FIFO_CTRL, val); 4551 } 4552 4553 static int debugfs_fifo_data_v3_hw_show(struct seq_file *s, void *p) 4554 { 4555 struct hisi_sas_phy *phy = s->private; 4556 4557 debugfs_read_fifo_data_v3_hw(phy); 4558 4559 debugfs_show_row_32_v3_hw(s, 0, 4560 HISI_SAS_FIFO_DATA_DW_SIZE * HISI_SAS_REG_MEM_SIZE, 4561 (__le32 *)phy->fifo.rd_data); 4562 4563 return 0; 4564 } 4565 DEFINE_SHOW_ATTRIBUTE(debugfs_fifo_data_v3_hw); 4566 4567 static void debugfs_fifo_init_v3_hw(struct hisi_hba *hisi_hba) 4568 { 4569 int phy_no; 4570 4571 hisi_hba->debugfs_fifo_dentry = 4572 debugfs_create_dir("fifo", hisi_hba->debugfs_dir); 4573 4574 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { 4575 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 4576 struct dentry *port_dentry; 4577 char name[256]; 4578 u32 val; 4579 4580 /* get default configuration for trace FIFO */ 4581 val = hisi_sas_phy_read32(hisi_hba, phy_no, DFX_FIFO_CTRL); 4582 val &= DFX_FIFO_CTRL_DUMP_MODE_MSK; 4583 val >>= DFX_FIFO_CTRL_DUMP_MODE_OFF; 4584 phy->fifo.dump_mode = val; 4585 4586 val = hisi_sas_phy_read32(hisi_hba, phy_no, DFX_FIFO_CTRL); 4587 val &= DFX_FIFO_CTRL_TRIGGER_MODE_MSK; 4588 val >>= DFX_FIFO_CTRL_TRIGGER_MODE_OFF; 4589 phy->fifo.trigger_mode = val; 4590 4591 val = hisi_sas_phy_read32(hisi_hba, phy_no, DFX_FIFO_CTRL); 4592 val &= DFX_FIFO_CTRL_SIGNAL_SEL_MSK; 4593 val >>= DFX_FIFO_CTRL_SIGNAL_SEL_OFF; 4594 phy->fifo.signal_sel = val; 4595 4596 val = hisi_sas_phy_read32(hisi_hba, phy_no, DFX_FIFO_DUMP_MSK); 4597 phy->fifo.dump_msk = val; 4598 4599 val = hisi_sas_phy_read32(hisi_hba, phy_no, DFX_FIFO_TRIGGER); 4600 phy->fifo.trigger = val; 4601 val = hisi_sas_phy_read32(hisi_hba, phy_no, DFX_FIFO_TRIGGER_MSK); 4602 phy->fifo.trigger_msk = val; 4603 4604 snprintf(name, 256, "%d", phy_no); 4605 port_dentry = debugfs_create_dir(name, 4606 hisi_hba->debugfs_fifo_dentry); 4607 4608 debugfs_create_file("update_config", 0200, port_dentry, phy, 4609 &debugfs_fifo_update_cfg_v3_hw_fops); 4610 4611 debugfs_create_file("signal_sel", 0600, port_dentry, 4612 &phy->fifo.signal_sel, 4613 &debugfs_v3_hw_fops); 4614 4615 debugfs_create_file("dump_msk", 0600, port_dentry, 4616 &phy->fifo.dump_msk, 4617 &debugfs_v3_hw_fops); 4618 4619 debugfs_create_file("dump_mode", 0600, port_dentry, 4620 &phy->fifo.dump_mode, 4621 &debugfs_v3_hw_fops); 4622 4623 debugfs_create_file("trigger_mode", 0600, port_dentry, 4624 &phy->fifo.trigger_mode, 4625 &debugfs_v3_hw_fops); 4626 4627 debugfs_create_file("trigger", 0600, port_dentry, 4628 &phy->fifo.trigger, 4629 &debugfs_v3_hw_fops); 4630 4631 debugfs_create_file("trigger_msk", 0600, port_dentry, 4632 &phy->fifo.trigger_msk, 4633 &debugfs_v3_hw_fops); 4634 4635 debugfs_create_file("fifo_data", 0400, port_dentry, phy, 4636 &debugfs_fifo_data_v3_hw_fops); 4637 } 4638 } 4639 4640 static void debugfs_release_v3_hw(struct hisi_hba *hisi_hba, int dump_index) 4641 { 4642 struct device *dev = hisi_hba->dev; 4643 int i; 4644 4645 devm_kfree(dev, hisi_hba->debugfs_iost_cache[dump_index].cache); 4646 hisi_hba->debugfs_iost_cache[dump_index].cache = NULL; 4647 devm_kfree(dev, hisi_hba->debugfs_itct_cache[dump_index].cache); 4648 hisi_hba->debugfs_itct_cache[dump_index].cache = NULL; 4649 devm_kfree(dev, hisi_hba->debugfs_iost[dump_index].iost); 4650 hisi_hba->debugfs_iost[dump_index].iost = NULL; 4651 devm_kfree(dev, hisi_hba->debugfs_itct[dump_index].itct); 4652 hisi_hba->debugfs_itct[dump_index].itct = NULL; 4653 4654 for (i = 0; i < hisi_hba->queue_count; i++) { 4655 devm_kfree(dev, hisi_hba->debugfs_dq[dump_index][i].hdr); 4656 hisi_hba->debugfs_dq[dump_index][i].hdr = NULL; 4657 } 4658 4659 for (i = 0; i < hisi_hba->queue_count; i++) { 4660 devm_kfree(dev, 4661 hisi_hba->debugfs_cq[dump_index][i].complete_hdr); 4662 hisi_hba->debugfs_cq[dump_index][i].complete_hdr = NULL; 4663 } 4664 4665 for (i = 0; i < DEBUGFS_REGS_NUM; i++) { 4666 devm_kfree(dev, hisi_hba->debugfs_regs[dump_index][i].data); 4667 hisi_hba->debugfs_regs[dump_index][i].data = NULL; 4668 } 4669 4670 for (i = 0; i < hisi_hba->n_phy; i++) { 4671 devm_kfree(dev, hisi_hba->debugfs_port_reg[dump_index][i].data); 4672 hisi_hba->debugfs_port_reg[dump_index][i].data = NULL; 4673 } 4674 } 4675 4676 static const struct hisi_sas_debugfs_reg *debugfs_reg_array_v3_hw[DEBUGFS_REGS_NUM] = { 4677 [DEBUGFS_GLOBAL] = &debugfs_global_reg, 4678 [DEBUGFS_AXI] = &debugfs_axi_reg, 4679 [DEBUGFS_RAS] = &debugfs_ras_reg, 4680 }; 4681 4682 static int debugfs_alloc_v3_hw(struct hisi_hba *hisi_hba, int dump_index) 4683 { 4684 const struct hisi_sas_hw *hw = hisi_hba->hw; 4685 struct device *dev = hisi_hba->dev; 4686 int p, c, d, r; 4687 size_t sz; 4688 4689 for (r = 0; r < DEBUGFS_REGS_NUM; r++) { 4690 struct hisi_sas_debugfs_regs *regs = 4691 &hisi_hba->debugfs_regs[dump_index][r]; 4692 4693 sz = debugfs_reg_array_v3_hw[r]->count * HISI_SAS_REG_MEM_SIZE; 4694 regs->data = devm_kmalloc(dev, sz, GFP_KERNEL); 4695 if (!regs->data) 4696 goto fail; 4697 regs->hisi_hba = hisi_hba; 4698 } 4699 4700 sz = debugfs_port_reg.count * HISI_SAS_REG_MEM_SIZE; 4701 for (p = 0; p < hisi_hba->n_phy; p++) { 4702 struct hisi_sas_debugfs_port *port = 4703 &hisi_hba->debugfs_port_reg[dump_index][p]; 4704 4705 port->data = devm_kmalloc(dev, sz, GFP_KERNEL); 4706 if (!port->data) 4707 goto fail; 4708 port->phy = &hisi_hba->phy[p]; 4709 } 4710 4711 sz = hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; 4712 for (c = 0; c < hisi_hba->queue_count; c++) { 4713 struct hisi_sas_debugfs_cq *cq = 4714 &hisi_hba->debugfs_cq[dump_index][c]; 4715 4716 cq->complete_hdr = devm_kmalloc(dev, sz, GFP_KERNEL); 4717 if (!cq->complete_hdr) 4718 goto fail; 4719 cq->cq = &hisi_hba->cq[c]; 4720 } 4721 4722 sz = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS; 4723 for (d = 0; d < hisi_hba->queue_count; d++) { 4724 struct hisi_sas_debugfs_dq *dq = 4725 &hisi_hba->debugfs_dq[dump_index][d]; 4726 4727 dq->hdr = devm_kmalloc(dev, sz, GFP_KERNEL); 4728 if (!dq->hdr) 4729 goto fail; 4730 dq->dq = &hisi_hba->dq[d]; 4731 } 4732 4733 sz = HISI_SAS_MAX_COMMANDS * sizeof(struct hisi_sas_iost); 4734 4735 hisi_hba->debugfs_iost[dump_index].iost = 4736 devm_kmalloc(dev, sz, GFP_KERNEL); 4737 if (!hisi_hba->debugfs_iost[dump_index].iost) 4738 goto fail; 4739 4740 sz = HISI_SAS_IOST_ITCT_CACHE_NUM * 4741 sizeof(struct hisi_sas_iost_itct_cache); 4742 4743 hisi_hba->debugfs_iost_cache[dump_index].cache = 4744 devm_kmalloc(dev, sz, GFP_KERNEL); 4745 if (!hisi_hba->debugfs_iost_cache[dump_index].cache) 4746 goto fail; 4747 4748 sz = HISI_SAS_IOST_ITCT_CACHE_NUM * 4749 sizeof(struct hisi_sas_iost_itct_cache); 4750 4751 hisi_hba->debugfs_itct_cache[dump_index].cache = 4752 devm_kmalloc(dev, sz, GFP_KERNEL); 4753 if (!hisi_hba->debugfs_itct_cache[dump_index].cache) 4754 goto fail; 4755 4756 /* New memory allocation must be locate before itct */ 4757 sz = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct); 4758 4759 hisi_hba->debugfs_itct[dump_index].itct = 4760 devm_kmalloc(dev, sz, GFP_KERNEL); 4761 if (!hisi_hba->debugfs_itct[dump_index].itct) 4762 goto fail; 4763 4764 return 0; 4765 fail: 4766 debugfs_release_v3_hw(hisi_hba, dump_index); 4767 return -ENOMEM; 4768 } 4769 4770 static int debugfs_snapshot_regs_v3_hw(struct hisi_hba *hisi_hba) 4771 { 4772 int debugfs_dump_index = hisi_hba->debugfs_dump_index; 4773 struct device *dev = hisi_hba->dev; 4774 u64 timestamp = local_clock(); 4775 4776 if (debugfs_dump_index >= hisi_sas_debugfs_dump_count) { 4777 dev_warn(dev, "dump count exceeded!\n"); 4778 return -EINVAL; 4779 } 4780 4781 if (debugfs_alloc_v3_hw(hisi_hba, debugfs_dump_index)) { 4782 dev_warn(dev, "failed to alloc memory\n"); 4783 return -ENOMEM; 4784 } 4785 4786 do_div(timestamp, NSEC_PER_MSEC); 4787 hisi_hba->debugfs_timestamp[debugfs_dump_index] = timestamp; 4788 4789 debugfs_snapshot_prepare_v3_hw(hisi_hba); 4790 4791 debugfs_snapshot_global_reg_v3_hw(hisi_hba); 4792 debugfs_snapshot_port_reg_v3_hw(hisi_hba); 4793 debugfs_snapshot_axi_reg_v3_hw(hisi_hba); 4794 debugfs_snapshot_ras_reg_v3_hw(hisi_hba); 4795 debugfs_snapshot_cq_reg_v3_hw(hisi_hba); 4796 debugfs_snapshot_dq_reg_v3_hw(hisi_hba); 4797 debugfs_snapshot_itct_reg_v3_hw(hisi_hba); 4798 debugfs_snapshot_iost_reg_v3_hw(hisi_hba); 4799 4800 debugfs_snapshot_restore_v3_hw(hisi_hba); 4801 hisi_hba->debugfs_dump_index++; 4802 4803 return 0; 4804 } 4805 4806 static void debugfs_phy_down_cnt_init_v3_hw(struct hisi_hba *hisi_hba) 4807 { 4808 struct dentry *dir = debugfs_create_dir("phy_down_cnt", 4809 hisi_hba->debugfs_dir); 4810 char name[NAME_BUF_SIZE]; 4811 int phy_no; 4812 4813 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { 4814 snprintf(name, NAME_BUF_SIZE, "%d", phy_no); 4815 debugfs_create_file(name, 0600, dir, 4816 &hisi_hba->phy[phy_no], 4817 &debugfs_phy_down_cnt_v3_hw_fops); 4818 } 4819 } 4820 4821 static void debugfs_bist_init_v3_hw(struct hisi_hba *hisi_hba) 4822 { 4823 struct dentry *ports_dentry; 4824 int phy_no; 4825 4826 hisi_hba->debugfs_bist_dentry = 4827 debugfs_create_dir("bist", hisi_hba->debugfs_dir); 4828 debugfs_create_file("link_rate", 0600, 4829 hisi_hba->debugfs_bist_dentry, hisi_hba, 4830 &debugfs_bist_linkrate_v3_hw_fops); 4831 4832 debugfs_create_file("code_mode", 0600, 4833 hisi_hba->debugfs_bist_dentry, hisi_hba, 4834 &debugfs_bist_code_mode_v3_hw_fops); 4835 4836 debugfs_create_file("fixed_code", 0600, 4837 hisi_hba->debugfs_bist_dentry, 4838 &hisi_hba->debugfs_bist_fixed_code[0], 4839 &debugfs_v3_hw_fops); 4840 4841 debugfs_create_file("fixed_code_1", 0600, 4842 hisi_hba->debugfs_bist_dentry, 4843 &hisi_hba->debugfs_bist_fixed_code[1], 4844 &debugfs_v3_hw_fops); 4845 4846 debugfs_create_file("phy_id", 0600, hisi_hba->debugfs_bist_dentry, 4847 hisi_hba, &debugfs_bist_phy_v3_hw_fops); 4848 4849 debugfs_create_file("cnt", 0600, hisi_hba->debugfs_bist_dentry, 4850 hisi_hba, &debugfs_bist_cnt_v3_hw_fops); 4851 4852 debugfs_create_file("loopback_mode", 0600, 4853 hisi_hba->debugfs_bist_dentry, 4854 hisi_hba, &debugfs_bist_mode_v3_hw_fops); 4855 4856 debugfs_create_file("enable", 0600, hisi_hba->debugfs_bist_dentry, 4857 hisi_hba, &debugfs_bist_enable_v3_hw_fops); 4858 4859 ports_dentry = debugfs_create_dir("port", hisi_hba->debugfs_bist_dentry); 4860 4861 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { 4862 struct dentry *port_dentry; 4863 struct dentry *ffe_dentry; 4864 char name[256]; 4865 int i; 4866 4867 snprintf(name, 256, "%d", phy_no); 4868 port_dentry = debugfs_create_dir(name, ports_dentry); 4869 ffe_dentry = debugfs_create_dir("ffe", port_dentry); 4870 for (i = 0; i < FFE_CFG_MAX; i++) { 4871 if (i == FFE_RESV) 4872 continue; 4873 debugfs_create_file(debugfs_ffe_name_v3_hw[i].name, 4874 0600, ffe_dentry, 4875 &hisi_hba->debugfs_bist_ffe[phy_no][i], 4876 &debugfs_v3_hw_fops); 4877 } 4878 } 4879 4880 hisi_hba->debugfs_bist_linkrate = SAS_LINK_RATE_1_5_GBPS; 4881 } 4882 4883 static int debugfs_dump_index_v3_hw_show(struct seq_file *s, void *p) 4884 { 4885 int *debugfs_dump_index = s->private; 4886 4887 if (*debugfs_dump_index > 0) 4888 seq_printf(s, "%d\n", *debugfs_dump_index - 1); 4889 else 4890 seq_puts(s, "dump not triggered\n"); 4891 4892 return 0; 4893 } 4894 DEFINE_SHOW_ATTRIBUTE(debugfs_dump_index_v3_hw); 4895 4896 static void debugfs_dump_init_v3_hw(struct hisi_hba *hisi_hba) 4897 { 4898 int i; 4899 4900 hisi_hba->debugfs_dump_dentry = 4901 debugfs_create_dir("dump", hisi_hba->debugfs_dir); 4902 4903 debugfs_create_file("latest_dump", 0400, hisi_hba->debugfs_dump_dentry, 4904 &hisi_hba->debugfs_dump_index, 4905 &debugfs_dump_index_v3_hw_fops); 4906 4907 for (i = 0; i < hisi_sas_debugfs_dump_count; i++) 4908 debugfs_create_files_v3_hw(hisi_hba, i); 4909 } 4910 4911 static void debugfs_exit_v3_hw(struct hisi_hba *hisi_hba) 4912 { 4913 debugfs_remove_recursive(hisi_hba->debugfs_dir); 4914 hisi_hba->debugfs_dir = NULL; 4915 } 4916 4917 static void debugfs_init_v3_hw(struct hisi_hba *hisi_hba) 4918 { 4919 struct device *dev = hisi_hba->dev; 4920 4921 hisi_hba->debugfs_dir = debugfs_create_dir(dev_name(dev), 4922 hisi_sas_debugfs_dir); 4923 /* create bist structures */ 4924 debugfs_bist_init_v3_hw(hisi_hba); 4925 4926 debugfs_dump_init_v3_hw(hisi_hba); 4927 4928 debugfs_phy_down_cnt_init_v3_hw(hisi_hba); 4929 debugfs_fifo_init_v3_hw(hisi_hba); 4930 debugfs_create_file("trigger_dump", 0200, 4931 hisi_hba->debugfs_dir, 4932 hisi_hba, 4933 &debugfs_trigger_dump_v3_hw_fops); 4934 } 4935 4936 static int 4937 hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id) 4938 { 4939 struct Scsi_Host *shost; 4940 struct hisi_hba *hisi_hba; 4941 struct device *dev = &pdev->dev; 4942 struct asd_sas_phy **arr_phy; 4943 struct asd_sas_port **arr_port; 4944 struct sas_ha_struct *sha; 4945 int rc, phy_nr, port_nr, i; 4946 4947 rc = pcim_enable_device(pdev); 4948 if (rc) 4949 goto err_out; 4950 4951 pci_set_master(pdev); 4952 4953 rc = pcim_iomap_regions(pdev, 1 << BAR_NO_V3_HW, DRV_NAME); 4954 if (rc) 4955 goto err_out; 4956 4957 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 4958 if (rc) { 4959 dev_err(dev, "No usable DMA addressing method\n"); 4960 rc = -ENODEV; 4961 goto err_out; 4962 } 4963 4964 shost = hisi_sas_shost_alloc_pci(pdev); 4965 if (!shost) { 4966 rc = -ENOMEM; 4967 goto err_out; 4968 } 4969 4970 sha = SHOST_TO_SAS_HA(shost); 4971 hisi_hba = shost_priv(shost); 4972 dev_set_drvdata(dev, sha); 4973 4974 hisi_hba->regs = pcim_iomap_table(pdev)[BAR_NO_V3_HW]; 4975 if (!hisi_hba->regs) { 4976 dev_err(dev, "cannot map register\n"); 4977 rc = -ENOMEM; 4978 goto err_out_free_host; 4979 } 4980 4981 phy_nr = port_nr = hisi_hba->n_phy; 4982 4983 arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL); 4984 arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL); 4985 if (!arr_phy || !arr_port) { 4986 rc = -ENOMEM; 4987 goto err_out_free_host; 4988 } 4989 4990 sha->sas_phy = arr_phy; 4991 sha->sas_port = arr_port; 4992 sha->shost = shost; 4993 sha->lldd_ha = hisi_hba; 4994 4995 shost->transportt = hisi_sas_stt; 4996 shost->max_id = HISI_SAS_MAX_DEVICES; 4997 shost->max_lun = ~0; 4998 shost->max_channel = 1; 4999 shost->max_cmd_len = HISI_SAS_MAX_CDB_LEN; 5000 shost->can_queue = HISI_SAS_UNRESERVED_IPTT; 5001 shost->cmd_per_lun = HISI_SAS_UNRESERVED_IPTT; 5002 if (hisi_hba->iopoll_q_cnt) 5003 shost->nr_maps = 3; 5004 else 5005 shost->nr_maps = 1; 5006 5007 sha->sas_ha_name = DRV_NAME; 5008 sha->dev = dev; 5009 sha->sas_addr = &hisi_hba->sas_addr[0]; 5010 sha->num_phys = hisi_hba->n_phy; 5011 5012 for (i = 0; i < hisi_hba->n_phy; i++) { 5013 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy; 5014 sha->sas_port[i] = &hisi_hba->port[i].sas_port; 5015 } 5016 5017 if (hisi_hba->prot_mask) { 5018 dev_info(dev, "Registering for DIF/DIX prot_mask=0x%x\n", 5019 prot_mask); 5020 scsi_host_set_prot(hisi_hba->shost, prot_mask); 5021 if (hisi_hba->prot_mask & HISI_SAS_DIX_PROT_MASK) 5022 scsi_host_set_guard(hisi_hba->shost, 5023 SHOST_DIX_GUARD_CRC); 5024 } 5025 5026 rc = interrupt_preinit_v3_hw(hisi_hba); 5027 if (rc) 5028 goto err_out_free_host; 5029 5030 rc = scsi_add_host(shost, dev); 5031 if (rc) 5032 goto err_out_free_host; 5033 5034 rc = sas_register_ha(sha); 5035 if (rc) 5036 goto err_out_remove_host; 5037 5038 rc = hisi_sas_v3_init(hisi_hba); 5039 if (rc) 5040 goto err_out_unregister_ha; 5041 5042 scsi_scan_host(shost); 5043 if (hisi_sas_debugfs_enable) 5044 debugfs_init_v3_hw(hisi_hba); 5045 5046 pm_runtime_set_autosuspend_delay(dev, 5000); 5047 pm_runtime_use_autosuspend(dev); 5048 /* 5049 * For the situation that there are ATA disks connected with SAS 5050 * controller, it additionally creates ata_port which will affect the 5051 * child_count of hisi_hba->dev. Even if suspended all the disks, 5052 * ata_port is still and the child_count of hisi_hba->dev is not 0. 5053 * So use pm_suspend_ignore_children() to ignore the effect to 5054 * hisi_hba->dev. 5055 */ 5056 pm_suspend_ignore_children(dev, true); 5057 pm_runtime_put_noidle(&pdev->dev); 5058 5059 return 0; 5060 5061 err_out_unregister_ha: 5062 sas_unregister_ha(sha); 5063 err_out_remove_host: 5064 scsi_remove_host(shost); 5065 err_out_free_host: 5066 hisi_sas_free(hisi_hba); 5067 scsi_host_put(shost); 5068 err_out: 5069 return rc; 5070 } 5071 5072 static void 5073 hisi_sas_v3_destroy_irqs(struct pci_dev *pdev, struct hisi_hba *hisi_hba) 5074 { 5075 int i; 5076 5077 devm_free_irq(&pdev->dev, pci_irq_vector(pdev, IRQ_PHY_UP_DOWN_INDEX), hisi_hba); 5078 devm_free_irq(&pdev->dev, pci_irq_vector(pdev, IRQ_CHL_INDEX), hisi_hba); 5079 devm_free_irq(&pdev->dev, pci_irq_vector(pdev, IRQ_AXI_INDEX), hisi_hba); 5080 for (i = 0; i < hisi_hba->cq_nvecs; i++) { 5081 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 5082 int nr = hisi_sas_intr_conv ? BASE_VECTORS_V3_HW : 5083 BASE_VECTORS_V3_HW + i; 5084 5085 devm_free_irq(&pdev->dev, pci_irq_vector(pdev, nr), cq); 5086 } 5087 } 5088 5089 static void hisi_sas_v3_remove(struct pci_dev *pdev) 5090 { 5091 struct device *dev = &pdev->dev; 5092 struct sas_ha_struct *sha = dev_get_drvdata(dev); 5093 struct hisi_hba *hisi_hba = sha->lldd_ha; 5094 struct Scsi_Host *shost = sha->shost; 5095 5096 pm_runtime_get_noresume(dev); 5097 if (hisi_sas_debugfs_enable) 5098 debugfs_exit_v3_hw(hisi_hba); 5099 5100 sas_unregister_ha(sha); 5101 flush_workqueue(hisi_hba->wq); 5102 sas_remove_host(shost); 5103 5104 hisi_sas_v3_destroy_irqs(pdev, hisi_hba); 5105 hisi_sas_free(hisi_hba); 5106 scsi_host_put(shost); 5107 } 5108 5109 static void hisi_sas_reset_prepare_v3_hw(struct pci_dev *pdev) 5110 { 5111 struct sas_ha_struct *sha = pci_get_drvdata(pdev); 5112 struct hisi_hba *hisi_hba = sha->lldd_ha; 5113 struct Scsi_Host *shost = hisi_hba->shost; 5114 struct device *dev = hisi_hba->dev; 5115 int rc; 5116 5117 wait_event(shost->host_wait, !scsi_host_in_recovery(shost)); 5118 dev_info(dev, "FLR prepare\n"); 5119 down(&hisi_hba->sem); 5120 set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags); 5121 hisi_sas_controller_reset_prepare(hisi_hba); 5122 5123 interrupt_disable_v3_hw(hisi_hba); 5124 rc = disable_host_v3_hw(hisi_hba); 5125 if (rc) 5126 dev_err(dev, "FLR: disable host failed rc=%d\n", rc); 5127 } 5128 5129 static void hisi_sas_reset_done_v3_hw(struct pci_dev *pdev) 5130 { 5131 struct sas_ha_struct *sha = pci_get_drvdata(pdev); 5132 struct hisi_hba *hisi_hba = sha->lldd_ha; 5133 struct Scsi_Host *shost = hisi_hba->shost; 5134 struct device *dev = hisi_hba->dev; 5135 int rc; 5136 5137 hisi_sas_init_mem(hisi_hba); 5138 5139 rc = hw_init_v3_hw(hisi_hba); 5140 if (rc) { 5141 dev_err(dev, "FLR: hw init failed rc=%d\n", rc); 5142 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 5143 scsi_unblock_requests(shost); 5144 clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags); 5145 up(&hisi_hba->sem); 5146 return; 5147 } 5148 5149 hisi_sas_controller_reset_done(hisi_hba); 5150 dev_info(dev, "FLR done\n"); 5151 } 5152 5153 enum { 5154 /* instances of the controller */ 5155 hip08, 5156 }; 5157 5158 static void enable_host_v3_hw(struct hisi_hba *hisi_hba) 5159 { 5160 u32 reg_val; 5161 5162 hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 5163 (u32)((1ULL << hisi_hba->queue_count) - 1)); 5164 5165 phys_init_v3_hw(hisi_hba); 5166 reg_val = hisi_sas_read32(hisi_hba, AXI_MASTER_CFG_BASE + 5167 AM_CTRL_GLOBAL); 5168 reg_val &= ~AM_CTRL_SHUTDOWN_REQ_MSK; 5169 hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE + 5170 AM_CTRL_GLOBAL, reg_val); 5171 } 5172 5173 static int _suspend_v3_hw(struct device *device) 5174 { 5175 struct pci_dev *pdev = to_pci_dev(device); 5176 struct sas_ha_struct *sha = pci_get_drvdata(pdev); 5177 struct hisi_hba *hisi_hba = sha->lldd_ha; 5178 struct device *dev = hisi_hba->dev; 5179 struct Scsi_Host *shost = hisi_hba->shost; 5180 int rc; 5181 5182 if (!pdev->pm_cap) { 5183 dev_err(dev, "PCI PM not supported\n"); 5184 return -ENODEV; 5185 } 5186 5187 if (test_and_set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) 5188 return -EPERM; 5189 5190 dev_warn(dev, "entering suspend state\n"); 5191 5192 scsi_block_requests(shost); 5193 set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 5194 flush_workqueue(hisi_hba->wq); 5195 interrupt_disable_v3_hw(hisi_hba); 5196 5197 #ifdef CONFIG_PM 5198 if ((device->power.runtime_status == RPM_SUSPENDING) && 5199 atomic_read(&device->power.usage_count)) { 5200 dev_err(dev, "PM suspend: host status cannot be suspended\n"); 5201 rc = -EBUSY; 5202 goto err_out; 5203 } 5204 #endif 5205 5206 rc = disable_host_v3_hw(hisi_hba); 5207 if (rc) { 5208 dev_err(dev, "PM suspend: disable host failed rc=%d\n", rc); 5209 goto err_out_recover_host; 5210 } 5211 5212 hisi_sas_init_mem(hisi_hba); 5213 5214 hisi_sas_release_tasks(hisi_hba); 5215 5216 sas_suspend_ha(sha); 5217 5218 dev_warn(dev, "end of suspending controller\n"); 5219 return 0; 5220 5221 err_out_recover_host: 5222 enable_host_v3_hw(hisi_hba); 5223 #ifdef CONFIG_PM 5224 err_out: 5225 #endif 5226 interrupt_enable_v3_hw(hisi_hba); 5227 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 5228 clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags); 5229 scsi_unblock_requests(shost); 5230 return rc; 5231 } 5232 5233 static int _resume_v3_hw(struct device *device) 5234 { 5235 struct pci_dev *pdev = to_pci_dev(device); 5236 struct sas_ha_struct *sha = pci_get_drvdata(pdev); 5237 struct hisi_hba *hisi_hba = sha->lldd_ha; 5238 struct Scsi_Host *shost = hisi_hba->shost; 5239 struct device *dev = hisi_hba->dev; 5240 unsigned int rc; 5241 pci_power_t device_state = pdev->current_state; 5242 5243 dev_warn(dev, "resuming from operating state [D%d]\n", 5244 device_state); 5245 5246 scsi_unblock_requests(shost); 5247 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 5248 5249 sas_prep_resume_ha(sha); 5250 rc = hw_init_v3_hw(hisi_hba); 5251 if (rc) { 5252 scsi_remove_host(shost); 5253 return rc; 5254 } 5255 phys_init_v3_hw(hisi_hba); 5256 5257 /* 5258 * If a directly-attached disk is removed during suspend, a deadlock 5259 * may occur, as the PHYE_RESUME_TIMEOUT processing will require the 5260 * hisi_hba->device to be active, which can only happen when resume 5261 * completes. So don't wait for the HA event workqueue to drain upon 5262 * resume. 5263 */ 5264 sas_resume_ha_no_sync(sha); 5265 clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags); 5266 5267 dev_warn(dev, "end of resuming controller\n"); 5268 5269 return 0; 5270 } 5271 5272 static int __maybe_unused suspend_v3_hw(struct device *device) 5273 { 5274 struct pci_dev *pdev = to_pci_dev(device); 5275 struct sas_ha_struct *sha = pci_get_drvdata(pdev); 5276 struct hisi_hba *hisi_hba = sha->lldd_ha; 5277 int rc; 5278 5279 set_bit(HISI_SAS_PM_BIT, &hisi_hba->flags); 5280 5281 rc = _suspend_v3_hw(device); 5282 if (rc) 5283 clear_bit(HISI_SAS_PM_BIT, &hisi_hba->flags); 5284 5285 return rc; 5286 } 5287 5288 static int __maybe_unused resume_v3_hw(struct device *device) 5289 { 5290 struct pci_dev *pdev = to_pci_dev(device); 5291 struct sas_ha_struct *sha = pci_get_drvdata(pdev); 5292 struct hisi_hba *hisi_hba = sha->lldd_ha; 5293 int rc = _resume_v3_hw(device); 5294 5295 clear_bit(HISI_SAS_PM_BIT, &hisi_hba->flags); 5296 5297 return rc; 5298 } 5299 5300 static const struct pci_device_id sas_v3_pci_table[] = { 5301 { PCI_VDEVICE(HUAWEI, 0xa230), hip08 }, 5302 {} 5303 }; 5304 MODULE_DEVICE_TABLE(pci, sas_v3_pci_table); 5305 5306 static const struct pci_error_handlers hisi_sas_err_handler = { 5307 .reset_prepare = hisi_sas_reset_prepare_v3_hw, 5308 .reset_done = hisi_sas_reset_done_v3_hw, 5309 }; 5310 5311 static UNIVERSAL_DEV_PM_OPS(hisi_sas_v3_pm_ops, 5312 suspend_v3_hw, 5313 resume_v3_hw, 5314 NULL); 5315 5316 static struct pci_driver sas_v3_pci_driver = { 5317 .name = DRV_NAME, 5318 .id_table = sas_v3_pci_table, 5319 .probe = hisi_sas_v3_probe, 5320 .remove = hisi_sas_v3_remove, 5321 .err_handler = &hisi_sas_err_handler, 5322 .driver.pm = &hisi_sas_v3_pm_ops, 5323 }; 5324 5325 module_pci_driver(sas_v3_pci_driver); 5326 module_param_named(intr_conv, hisi_sas_intr_conv, bool, 0444); 5327 5328 MODULE_LICENSE("GPL"); 5329 MODULE_AUTHOR("John Garry <john.garry@huawei.com>"); 5330 MODULE_DESCRIPTION("HISILICON SAS controller v3 hw driver based on pci device"); 5331 MODULE_ALIAS("pci:" DRV_NAME); 5332