1 /* 2 * Copyright (c) 2016 Linaro Ltd. 3 * Copyright (c) 2016 Hisilicon Limited. 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 */ 11 12 #include "hisi_sas.h" 13 #define DRV_NAME "hisi_sas_v2_hw" 14 15 /* global registers need init*/ 16 #define DLVRY_QUEUE_ENABLE 0x0 17 #define IOST_BASE_ADDR_LO 0x8 18 #define IOST_BASE_ADDR_HI 0xc 19 #define ITCT_BASE_ADDR_LO 0x10 20 #define ITCT_BASE_ADDR_HI 0x14 21 #define IO_BROKEN_MSG_ADDR_LO 0x18 22 #define IO_BROKEN_MSG_ADDR_HI 0x1c 23 #define PHY_CONTEXT 0x20 24 #define PHY_STATE 0x24 25 #define PHY_PORT_NUM_MA 0x28 26 #define PORT_STATE 0x2c 27 #define PORT_STATE_PHY8_PORT_NUM_OFF 16 28 #define PORT_STATE_PHY8_PORT_NUM_MSK (0xf << PORT_STATE_PHY8_PORT_NUM_OFF) 29 #define PORT_STATE_PHY8_CONN_RATE_OFF 20 30 #define PORT_STATE_PHY8_CONN_RATE_MSK (0xf << PORT_STATE_PHY8_CONN_RATE_OFF) 31 #define PHY_CONN_RATE 0x30 32 #define HGC_TRANS_TASK_CNT_LIMIT 0x38 33 #define AXI_AHB_CLK_CFG 0x3c 34 #define ITCT_CLR 0x44 35 #define ITCT_CLR_EN_OFF 16 36 #define ITCT_CLR_EN_MSK (0x1 << ITCT_CLR_EN_OFF) 37 #define ITCT_DEV_OFF 0 38 #define ITCT_DEV_MSK (0x7ff << ITCT_DEV_OFF) 39 #define AXI_USER1 0x48 40 #define AXI_USER2 0x4c 41 #define IO_SATA_BROKEN_MSG_ADDR_LO 0x58 42 #define IO_SATA_BROKEN_MSG_ADDR_HI 0x5c 43 #define SATA_INITI_D2H_STORE_ADDR_LO 0x60 44 #define SATA_INITI_D2H_STORE_ADDR_HI 0x64 45 #define HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL 0x84 46 #define HGC_SAS_TXFAIL_RETRY_CTRL 0x88 47 #define HGC_GET_ITV_TIME 0x90 48 #define DEVICE_MSG_WORK_MODE 0x94 49 #define OPENA_WT_CONTI_TIME 0x9c 50 #define I_T_NEXUS_LOSS_TIME 0xa0 51 #define MAX_CON_TIME_LIMIT_TIME 0xa4 52 #define BUS_INACTIVE_LIMIT_TIME 0xa8 53 #define REJECT_TO_OPEN_LIMIT_TIME 0xac 54 #define CFG_AGING_TIME 0xbc 55 #define HGC_DFX_CFG2 0xc0 56 #define HGC_IOMB_PROC1_STATUS 0x104 57 #define CFG_1US_TIMER_TRSH 0xcc 58 #define HGC_LM_DFX_STATUS2 0x128 59 #define HGC_LM_DFX_STATUS2_IOSTLIST_OFF 0 60 #define HGC_LM_DFX_STATUS2_IOSTLIST_MSK (0xfff << \ 61 HGC_LM_DFX_STATUS2_IOSTLIST_OFF) 62 #define HGC_LM_DFX_STATUS2_ITCTLIST_OFF 12 63 #define HGC_LM_DFX_STATUS2_ITCTLIST_MSK (0x7ff << \ 64 HGC_LM_DFX_STATUS2_ITCTLIST_OFF) 65 #define HGC_CQE_ECC_ADDR 0x13c 66 #define HGC_CQE_ECC_1B_ADDR_OFF 0 67 #define HGC_CQE_ECC_1B_ADDR_MSK (0x3f << HGC_CQE_ECC_1B_ADDR_OFF) 68 #define HGC_CQE_ECC_MB_ADDR_OFF 8 69 #define HGC_CQE_ECC_MB_ADDR_MSK (0x3f << HGC_CQE_ECC_MB_ADDR_OFF) 70 #define HGC_IOST_ECC_ADDR 0x140 71 #define HGC_IOST_ECC_1B_ADDR_OFF 0 72 #define HGC_IOST_ECC_1B_ADDR_MSK (0x3ff << HGC_IOST_ECC_1B_ADDR_OFF) 73 #define HGC_IOST_ECC_MB_ADDR_OFF 16 74 #define HGC_IOST_ECC_MB_ADDR_MSK (0x3ff << HGC_IOST_ECC_MB_ADDR_OFF) 75 #define HGC_DQE_ECC_ADDR 0x144 76 #define HGC_DQE_ECC_1B_ADDR_OFF 0 77 #define HGC_DQE_ECC_1B_ADDR_MSK (0xfff << HGC_DQE_ECC_1B_ADDR_OFF) 78 #define HGC_DQE_ECC_MB_ADDR_OFF 16 79 #define HGC_DQE_ECC_MB_ADDR_MSK (0xfff << HGC_DQE_ECC_MB_ADDR_OFF) 80 #define HGC_INVLD_DQE_INFO 0x148 81 #define HGC_INVLD_DQE_INFO_FB_CH0_OFF 9 82 #define HGC_INVLD_DQE_INFO_FB_CH0_MSK (0x1 << HGC_INVLD_DQE_INFO_FB_CH0_OFF) 83 #define HGC_INVLD_DQE_INFO_FB_CH3_OFF 18 84 #define HGC_ITCT_ECC_ADDR 0x150 85 #define HGC_ITCT_ECC_1B_ADDR_OFF 0 86 #define HGC_ITCT_ECC_1B_ADDR_MSK (0x3ff << \ 87 HGC_ITCT_ECC_1B_ADDR_OFF) 88 #define HGC_ITCT_ECC_MB_ADDR_OFF 16 89 #define HGC_ITCT_ECC_MB_ADDR_MSK (0x3ff << \ 90 HGC_ITCT_ECC_MB_ADDR_OFF) 91 #define HGC_AXI_FIFO_ERR_INFO 0x154 92 #define AXI_ERR_INFO_OFF 0 93 #define AXI_ERR_INFO_MSK (0xff << AXI_ERR_INFO_OFF) 94 #define FIFO_ERR_INFO_OFF 8 95 #define FIFO_ERR_INFO_MSK (0xff << FIFO_ERR_INFO_OFF) 96 #define INT_COAL_EN 0x19c 97 #define OQ_INT_COAL_TIME 0x1a0 98 #define OQ_INT_COAL_CNT 0x1a4 99 #define ENT_INT_COAL_TIME 0x1a8 100 #define ENT_INT_COAL_CNT 0x1ac 101 #define OQ_INT_SRC 0x1b0 102 #define OQ_INT_SRC_MSK 0x1b4 103 #define ENT_INT_SRC1 0x1b8 104 #define ENT_INT_SRC1_D2H_FIS_CH0_OFF 0 105 #define ENT_INT_SRC1_D2H_FIS_CH0_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH0_OFF) 106 #define ENT_INT_SRC1_D2H_FIS_CH1_OFF 8 107 #define ENT_INT_SRC1_D2H_FIS_CH1_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH1_OFF) 108 #define ENT_INT_SRC2 0x1bc 109 #define ENT_INT_SRC3 0x1c0 110 #define ENT_INT_SRC3_WP_DEPTH_OFF 8 111 #define ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF 9 112 #define ENT_INT_SRC3_RP_DEPTH_OFF 10 113 #define ENT_INT_SRC3_AXI_OFF 11 114 #define ENT_INT_SRC3_FIFO_OFF 12 115 #define ENT_INT_SRC3_LM_OFF 14 116 #define ENT_INT_SRC3_ITC_INT_OFF 15 117 #define ENT_INT_SRC3_ITC_INT_MSK (0x1 << ENT_INT_SRC3_ITC_INT_OFF) 118 #define ENT_INT_SRC3_ABT_OFF 16 119 #define ENT_INT_SRC_MSK1 0x1c4 120 #define ENT_INT_SRC_MSK2 0x1c8 121 #define ENT_INT_SRC_MSK3 0x1cc 122 #define ENT_INT_SRC_MSK3_ENT95_MSK_OFF 31 123 #define ENT_INT_SRC_MSK3_ENT95_MSK_MSK (0x1 << ENT_INT_SRC_MSK3_ENT95_MSK_OFF) 124 #define SAS_ECC_INTR 0x1e8 125 #define SAS_ECC_INTR_DQE_ECC_1B_OFF 0 126 #define SAS_ECC_INTR_DQE_ECC_MB_OFF 1 127 #define SAS_ECC_INTR_IOST_ECC_1B_OFF 2 128 #define SAS_ECC_INTR_IOST_ECC_MB_OFF 3 129 #define SAS_ECC_INTR_ITCT_ECC_MB_OFF 4 130 #define SAS_ECC_INTR_ITCT_ECC_1B_OFF 5 131 #define SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF 6 132 #define SAS_ECC_INTR_IOSTLIST_ECC_1B_OFF 7 133 #define SAS_ECC_INTR_ITCTLIST_ECC_1B_OFF 8 134 #define SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF 9 135 #define SAS_ECC_INTR_CQE_ECC_1B_OFF 10 136 #define SAS_ECC_INTR_CQE_ECC_MB_OFF 11 137 #define SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF 12 138 #define SAS_ECC_INTR_NCQ_MEM0_ECC_1B_OFF 13 139 #define SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF 14 140 #define SAS_ECC_INTR_NCQ_MEM1_ECC_1B_OFF 15 141 #define SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF 16 142 #define SAS_ECC_INTR_NCQ_MEM2_ECC_1B_OFF 17 143 #define SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF 18 144 #define SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF 19 145 #define SAS_ECC_INTR_MSK 0x1ec 146 #define HGC_ERR_STAT_EN 0x238 147 #define DLVRY_Q_0_BASE_ADDR_LO 0x260 148 #define DLVRY_Q_0_BASE_ADDR_HI 0x264 149 #define DLVRY_Q_0_DEPTH 0x268 150 #define DLVRY_Q_0_WR_PTR 0x26c 151 #define DLVRY_Q_0_RD_PTR 0x270 152 #define HYPER_STREAM_ID_EN_CFG 0xc80 153 #define OQ0_INT_SRC_MSK 0xc90 154 #define COMPL_Q_0_BASE_ADDR_LO 0x4e0 155 #define COMPL_Q_0_BASE_ADDR_HI 0x4e4 156 #define COMPL_Q_0_DEPTH 0x4e8 157 #define COMPL_Q_0_WR_PTR 0x4ec 158 #define COMPL_Q_0_RD_PTR 0x4f0 159 #define HGC_RXM_DFX_STATUS14 0xae8 160 #define HGC_RXM_DFX_STATUS14_MEM0_OFF 0 161 #define HGC_RXM_DFX_STATUS14_MEM0_MSK (0x1ff << \ 162 HGC_RXM_DFX_STATUS14_MEM0_OFF) 163 #define HGC_RXM_DFX_STATUS14_MEM1_OFF 9 164 #define HGC_RXM_DFX_STATUS14_MEM1_MSK (0x1ff << \ 165 HGC_RXM_DFX_STATUS14_MEM1_OFF) 166 #define HGC_RXM_DFX_STATUS14_MEM2_OFF 18 167 #define HGC_RXM_DFX_STATUS14_MEM2_MSK (0x1ff << \ 168 HGC_RXM_DFX_STATUS14_MEM2_OFF) 169 #define HGC_RXM_DFX_STATUS15 0xaec 170 #define HGC_RXM_DFX_STATUS15_MEM3_OFF 0 171 #define HGC_RXM_DFX_STATUS15_MEM3_MSK (0x1ff << \ 172 HGC_RXM_DFX_STATUS15_MEM3_OFF) 173 /* phy registers need init */ 174 #define PORT_BASE (0x2000) 175 176 #define PHY_CFG (PORT_BASE + 0x0) 177 #define HARD_PHY_LINKRATE (PORT_BASE + 0x4) 178 #define PHY_CFG_ENA_OFF 0 179 #define PHY_CFG_ENA_MSK (0x1 << PHY_CFG_ENA_OFF) 180 #define PHY_CFG_DC_OPT_OFF 2 181 #define PHY_CFG_DC_OPT_MSK (0x1 << PHY_CFG_DC_OPT_OFF) 182 #define PROG_PHY_LINK_RATE (PORT_BASE + 0x8) 183 #define PROG_PHY_LINK_RATE_MAX_OFF 0 184 #define PROG_PHY_LINK_RATE_MAX_MSK (0xff << PROG_PHY_LINK_RATE_MAX_OFF) 185 #define PHY_CTRL (PORT_BASE + 0x14) 186 #define PHY_CTRL_RESET_OFF 0 187 #define PHY_CTRL_RESET_MSK (0x1 << PHY_CTRL_RESET_OFF) 188 #define SAS_PHY_CTRL (PORT_BASE + 0x20) 189 #define SL_CFG (PORT_BASE + 0x84) 190 #define PHY_PCN (PORT_BASE + 0x44) 191 #define SL_TOUT_CFG (PORT_BASE + 0x8c) 192 #define SL_CONTROL (PORT_BASE + 0x94) 193 #define SL_CONTROL_NOTIFY_EN_OFF 0 194 #define SL_CONTROL_NOTIFY_EN_MSK (0x1 << SL_CONTROL_NOTIFY_EN_OFF) 195 #define SL_CONTROL_CTA_OFF 17 196 #define SL_CONTROL_CTA_MSK (0x1 << SL_CONTROL_CTA_OFF) 197 #define RX_PRIMS_STATUS (PORT_BASE + 0x98) 198 #define RX_BCAST_CHG_OFF 1 199 #define RX_BCAST_CHG_MSK (0x1 << RX_BCAST_CHG_OFF) 200 #define TX_ID_DWORD0 (PORT_BASE + 0x9c) 201 #define TX_ID_DWORD1 (PORT_BASE + 0xa0) 202 #define TX_ID_DWORD2 (PORT_BASE + 0xa4) 203 #define TX_ID_DWORD3 (PORT_BASE + 0xa8) 204 #define TX_ID_DWORD4 (PORT_BASE + 0xaC) 205 #define TX_ID_DWORD5 (PORT_BASE + 0xb0) 206 #define TX_ID_DWORD6 (PORT_BASE + 0xb4) 207 #define TXID_AUTO (PORT_BASE + 0xb8) 208 #define TXID_AUTO_CT3_OFF 1 209 #define TXID_AUTO_CT3_MSK (0x1 << TXID_AUTO_CT3_OFF) 210 #define RX_IDAF_DWORD0 (PORT_BASE + 0xc4) 211 #define RX_IDAF_DWORD1 (PORT_BASE + 0xc8) 212 #define RX_IDAF_DWORD2 (PORT_BASE + 0xcc) 213 #define RX_IDAF_DWORD3 (PORT_BASE + 0xd0) 214 #define RX_IDAF_DWORD4 (PORT_BASE + 0xd4) 215 #define RX_IDAF_DWORD5 (PORT_BASE + 0xd8) 216 #define RX_IDAF_DWORD6 (PORT_BASE + 0xdc) 217 #define RXOP_CHECK_CFG_H (PORT_BASE + 0xfc) 218 #define DONE_RECEIVED_TIME (PORT_BASE + 0x11c) 219 #define CHL_INT0 (PORT_BASE + 0x1b4) 220 #define CHL_INT0_HOTPLUG_TOUT_OFF 0 221 #define CHL_INT0_HOTPLUG_TOUT_MSK (0x1 << CHL_INT0_HOTPLUG_TOUT_OFF) 222 #define CHL_INT0_SL_RX_BCST_ACK_OFF 1 223 #define CHL_INT0_SL_RX_BCST_ACK_MSK (0x1 << CHL_INT0_SL_RX_BCST_ACK_OFF) 224 #define CHL_INT0_SL_PHY_ENABLE_OFF 2 225 #define CHL_INT0_SL_PHY_ENABLE_MSK (0x1 << CHL_INT0_SL_PHY_ENABLE_OFF) 226 #define CHL_INT0_NOT_RDY_OFF 4 227 #define CHL_INT0_NOT_RDY_MSK (0x1 << CHL_INT0_NOT_RDY_OFF) 228 #define CHL_INT0_PHY_RDY_OFF 5 229 #define CHL_INT0_PHY_RDY_MSK (0x1 << CHL_INT0_PHY_RDY_OFF) 230 #define CHL_INT1 (PORT_BASE + 0x1b8) 231 #define CHL_INT1_DMAC_TX_ECC_ERR_OFF 15 232 #define CHL_INT1_DMAC_TX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_TX_ECC_ERR_OFF) 233 #define CHL_INT1_DMAC_RX_ECC_ERR_OFF 17 234 #define CHL_INT1_DMAC_RX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_RX_ECC_ERR_OFF) 235 #define CHL_INT2 (PORT_BASE + 0x1bc) 236 #define CHL_INT0_MSK (PORT_BASE + 0x1c0) 237 #define CHL_INT1_MSK (PORT_BASE + 0x1c4) 238 #define CHL_INT2_MSK (PORT_BASE + 0x1c8) 239 #define CHL_INT_COAL_EN (PORT_BASE + 0x1d0) 240 #define PHY_CTRL_RDY_MSK (PORT_BASE + 0x2b0) 241 #define PHYCTRL_NOT_RDY_MSK (PORT_BASE + 0x2b4) 242 #define PHYCTRL_DWS_RESET_MSK (PORT_BASE + 0x2b8) 243 #define PHYCTRL_PHY_ENA_MSK (PORT_BASE + 0x2bc) 244 #define SL_RX_BCAST_CHK_MSK (PORT_BASE + 0x2c0) 245 #define PHYCTRL_OOB_RESTART_MSK (PORT_BASE + 0x2c4) 246 #define DMA_TX_STATUS (PORT_BASE + 0x2d0) 247 #define DMA_TX_STATUS_BUSY_OFF 0 248 #define DMA_TX_STATUS_BUSY_MSK (0x1 << DMA_TX_STATUS_BUSY_OFF) 249 #define DMA_RX_STATUS (PORT_BASE + 0x2e8) 250 #define DMA_RX_STATUS_BUSY_OFF 0 251 #define DMA_RX_STATUS_BUSY_MSK (0x1 << DMA_RX_STATUS_BUSY_OFF) 252 253 #define AXI_CFG (0x5100) 254 #define AM_CFG_MAX_TRANS (0x5010) 255 #define AM_CFG_SINGLE_PORT_MAX_TRANS (0x5014) 256 257 /* HW dma structures */ 258 /* Delivery queue header */ 259 /* dw0 */ 260 #define CMD_HDR_ABORT_FLAG_OFF 0 261 #define CMD_HDR_ABORT_FLAG_MSK (0x3 << CMD_HDR_ABORT_FLAG_OFF) 262 #define CMD_HDR_ABORT_DEVICE_TYPE_OFF 2 263 #define CMD_HDR_ABORT_DEVICE_TYPE_MSK (0x1 << CMD_HDR_ABORT_DEVICE_TYPE_OFF) 264 #define CMD_HDR_RESP_REPORT_OFF 5 265 #define CMD_HDR_RESP_REPORT_MSK (0x1 << CMD_HDR_RESP_REPORT_OFF) 266 #define CMD_HDR_TLR_CTRL_OFF 6 267 #define CMD_HDR_TLR_CTRL_MSK (0x3 << CMD_HDR_TLR_CTRL_OFF) 268 #define CMD_HDR_PORT_OFF 18 269 #define CMD_HDR_PORT_MSK (0xf << CMD_HDR_PORT_OFF) 270 #define CMD_HDR_PRIORITY_OFF 27 271 #define CMD_HDR_PRIORITY_MSK (0x1 << CMD_HDR_PRIORITY_OFF) 272 #define CMD_HDR_CMD_OFF 29 273 #define CMD_HDR_CMD_MSK (0x7 << CMD_HDR_CMD_OFF) 274 /* dw1 */ 275 #define CMD_HDR_DIR_OFF 5 276 #define CMD_HDR_DIR_MSK (0x3 << CMD_HDR_DIR_OFF) 277 #define CMD_HDR_RESET_OFF 7 278 #define CMD_HDR_RESET_MSK (0x1 << CMD_HDR_RESET_OFF) 279 #define CMD_HDR_VDTL_OFF 10 280 #define CMD_HDR_VDTL_MSK (0x1 << CMD_HDR_VDTL_OFF) 281 #define CMD_HDR_FRAME_TYPE_OFF 11 282 #define CMD_HDR_FRAME_TYPE_MSK (0x1f << CMD_HDR_FRAME_TYPE_OFF) 283 #define CMD_HDR_DEV_ID_OFF 16 284 #define CMD_HDR_DEV_ID_MSK (0xffff << CMD_HDR_DEV_ID_OFF) 285 /* dw2 */ 286 #define CMD_HDR_CFL_OFF 0 287 #define CMD_HDR_CFL_MSK (0x1ff << CMD_HDR_CFL_OFF) 288 #define CMD_HDR_NCQ_TAG_OFF 10 289 #define CMD_HDR_NCQ_TAG_MSK (0x1f << CMD_HDR_NCQ_TAG_OFF) 290 #define CMD_HDR_MRFL_OFF 15 291 #define CMD_HDR_MRFL_MSK (0x1ff << CMD_HDR_MRFL_OFF) 292 #define CMD_HDR_SG_MOD_OFF 24 293 #define CMD_HDR_SG_MOD_MSK (0x3 << CMD_HDR_SG_MOD_OFF) 294 #define CMD_HDR_FIRST_BURST_OFF 26 295 #define CMD_HDR_FIRST_BURST_MSK (0x1 << CMD_HDR_SG_MOD_OFF) 296 /* dw3 */ 297 #define CMD_HDR_IPTT_OFF 0 298 #define CMD_HDR_IPTT_MSK (0xffff << CMD_HDR_IPTT_OFF) 299 /* dw6 */ 300 #define CMD_HDR_DIF_SGL_LEN_OFF 0 301 #define CMD_HDR_DIF_SGL_LEN_MSK (0xffff << CMD_HDR_DIF_SGL_LEN_OFF) 302 #define CMD_HDR_DATA_SGL_LEN_OFF 16 303 #define CMD_HDR_DATA_SGL_LEN_MSK (0xffff << CMD_HDR_DATA_SGL_LEN_OFF) 304 #define CMD_HDR_ABORT_IPTT_OFF 16 305 #define CMD_HDR_ABORT_IPTT_MSK (0xffff << CMD_HDR_ABORT_IPTT_OFF) 306 307 /* Completion header */ 308 /* dw0 */ 309 #define CMPLT_HDR_RSPNS_XFRD_OFF 10 310 #define CMPLT_HDR_RSPNS_XFRD_MSK (0x1 << CMPLT_HDR_RSPNS_XFRD_OFF) 311 #define CMPLT_HDR_ERX_OFF 12 312 #define CMPLT_HDR_ERX_MSK (0x1 << CMPLT_HDR_ERX_OFF) 313 #define CMPLT_HDR_ABORT_STAT_OFF 13 314 #define CMPLT_HDR_ABORT_STAT_MSK (0x7 << CMPLT_HDR_ABORT_STAT_OFF) 315 /* abort_stat */ 316 #define STAT_IO_NOT_VALID 0x1 317 #define STAT_IO_NO_DEVICE 0x2 318 #define STAT_IO_COMPLETE 0x3 319 #define STAT_IO_ABORTED 0x4 320 /* dw1 */ 321 #define CMPLT_HDR_IPTT_OFF 0 322 #define CMPLT_HDR_IPTT_MSK (0xffff << CMPLT_HDR_IPTT_OFF) 323 #define CMPLT_HDR_DEV_ID_OFF 16 324 #define CMPLT_HDR_DEV_ID_MSK (0xffff << CMPLT_HDR_DEV_ID_OFF) 325 326 /* ITCT header */ 327 /* qw0 */ 328 #define ITCT_HDR_DEV_TYPE_OFF 0 329 #define ITCT_HDR_DEV_TYPE_MSK (0x3 << ITCT_HDR_DEV_TYPE_OFF) 330 #define ITCT_HDR_VALID_OFF 2 331 #define ITCT_HDR_VALID_MSK (0x1 << ITCT_HDR_VALID_OFF) 332 #define ITCT_HDR_MCR_OFF 5 333 #define ITCT_HDR_MCR_MSK (0xf << ITCT_HDR_MCR_OFF) 334 #define ITCT_HDR_VLN_OFF 9 335 #define ITCT_HDR_VLN_MSK (0xf << ITCT_HDR_VLN_OFF) 336 #define ITCT_HDR_PORT_ID_OFF 28 337 #define ITCT_HDR_PORT_ID_MSK (0xf << ITCT_HDR_PORT_ID_OFF) 338 /* qw2 */ 339 #define ITCT_HDR_INLT_OFF 0 340 #define ITCT_HDR_INLT_MSK (0xffffULL << ITCT_HDR_INLT_OFF) 341 #define ITCT_HDR_BITLT_OFF 16 342 #define ITCT_HDR_BITLT_MSK (0xffffULL << ITCT_HDR_BITLT_OFF) 343 #define ITCT_HDR_MCTLT_OFF 32 344 #define ITCT_HDR_MCTLT_MSK (0xffffULL << ITCT_HDR_MCTLT_OFF) 345 #define ITCT_HDR_RTOLT_OFF 48 346 #define ITCT_HDR_RTOLT_MSK (0xffffULL << ITCT_HDR_RTOLT_OFF) 347 348 #define HISI_SAS_FATAL_INT_NR 2 349 350 struct hisi_sas_complete_v2_hdr { 351 __le32 dw0; 352 __le32 dw1; 353 __le32 act; 354 __le32 dw3; 355 }; 356 357 struct hisi_sas_err_record_v2 { 358 /* dw0 */ 359 __le32 trans_tx_fail_type; 360 361 /* dw1 */ 362 __le32 trans_rx_fail_type; 363 364 /* dw2 */ 365 __le16 dma_tx_err_type; 366 __le16 sipc_rx_err_type; 367 368 /* dw3 */ 369 __le32 dma_rx_err_type; 370 }; 371 372 enum { 373 HISI_SAS_PHY_PHY_UPDOWN, 374 HISI_SAS_PHY_CHNL_INT, 375 HISI_SAS_PHY_INT_NR 376 }; 377 378 enum { 379 TRANS_TX_FAIL_BASE = 0x0, /* dw0 */ 380 TRANS_RX_FAIL_BASE = 0x100, /* dw1 */ 381 DMA_TX_ERR_BASE = 0x200, /* dw2 bit 15-0 */ 382 SIPC_RX_ERR_BASE = 0x300, /* dw2 bit 31-16*/ 383 DMA_RX_ERR_BASE = 0x400, /* dw3 */ 384 385 /* trans tx*/ 386 TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS = TRANS_TX_FAIL_BASE, /* 0x0 */ 387 TRANS_TX_ERR_PHY_NOT_ENABLE, /* 0x1 */ 388 TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION, /* 0x2 */ 389 TRANS_TX_OPEN_CNX_ERR_ZONE_VIOLATION, /* 0x3 */ 390 TRANS_TX_OPEN_CNX_ERR_BY_OTHER, /* 0x4 */ 391 RESERVED0, /* 0x5 */ 392 TRANS_TX_OPEN_CNX_ERR_AIP_TIMEOUT, /* 0x6 */ 393 TRANS_TX_OPEN_CNX_ERR_STP_RESOURCES_BUSY, /* 0x7 */ 394 TRANS_TX_OPEN_CNX_ERR_PROTOCOL_NOT_SUPPORTED, /* 0x8 */ 395 TRANS_TX_OPEN_CNX_ERR_CONNECTION_RATE_NOT_SUPPORTED, /* 0x9 */ 396 TRANS_TX_OPEN_CNX_ERR_BAD_DESTINATION, /* 0xa */ 397 TRANS_TX_OPEN_CNX_ERR_BREAK_RCVD, /* 0xb */ 398 TRANS_TX_OPEN_CNX_ERR_LOW_PHY_POWER, /* 0xc */ 399 TRANS_TX_OPEN_CNX_ERR_PATHWAY_BLOCKED, /* 0xd */ 400 TRANS_TX_OPEN_CNX_ERR_OPEN_TIMEOUT, /* 0xe */ 401 TRANS_TX_OPEN_CNX_ERR_NO_DESTINATION, /* 0xf */ 402 TRANS_TX_OPEN_RETRY_ERR_THRESHOLD_REACHED, /* 0x10 */ 403 TRANS_TX_ERR_FRAME_TXED, /* 0x11 */ 404 TRANS_TX_ERR_WITH_BREAK_TIMEOUT, /* 0x12 */ 405 TRANS_TX_ERR_WITH_BREAK_REQUEST, /* 0x13 */ 406 TRANS_TX_ERR_WITH_BREAK_RECEVIED, /* 0x14 */ 407 TRANS_TX_ERR_WITH_CLOSE_TIMEOUT, /* 0x15 */ 408 TRANS_TX_ERR_WITH_CLOSE_NORMAL, /* 0x16 for ssp*/ 409 TRANS_TX_ERR_WITH_CLOSE_PHYDISALE, /* 0x17 */ 410 TRANS_TX_ERR_WITH_CLOSE_DWS_TIMEOUT, /* 0x18 */ 411 TRANS_TX_ERR_WITH_CLOSE_COMINIT, /* 0x19 */ 412 TRANS_TX_ERR_WITH_NAK_RECEVIED, /* 0x1a for ssp*/ 413 TRANS_TX_ERR_WITH_ACK_NAK_TIMEOUT, /* 0x1b for ssp*/ 414 /*IO_TX_ERR_WITH_R_ERR_RECEVIED, [> 0x1b for sata/stp<] */ 415 TRANS_TX_ERR_WITH_CREDIT_TIMEOUT, /* 0x1c for ssp */ 416 /*IO_RX_ERR_WITH_SATA_DEVICE_LOST 0x1c for sata/stp */ 417 TRANS_TX_ERR_WITH_IPTT_CONFLICT, /* 0x1d for ssp/smp */ 418 TRANS_TX_ERR_WITH_OPEN_BY_DES_OR_OTHERS, /* 0x1e */ 419 /*IO_TX_ERR_WITH_SYNC_RXD, [> 0x1e <] for sata/stp */ 420 TRANS_TX_ERR_WITH_WAIT_RECV_TIMEOUT, /* 0x1f for sata/stp */ 421 422 /* trans rx */ 423 TRANS_RX_ERR_WITH_RXFRAME_CRC_ERR = TRANS_RX_FAIL_BASE, /* 0x100 */ 424 TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR, /* 0x101 for sata/stp */ 425 TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM, /* 0x102 for ssp/smp */ 426 /*IO_ERR_WITH_RXFIS_8B10B_CODE_ERR, [> 0x102 <] for sata/stp */ 427 TRANS_RX_ERR_WITH_RXFIS_DECODE_ERROR, /* 0x103 for sata/stp */ 428 TRANS_RX_ERR_WITH_RXFIS_CRC_ERR, /* 0x104 for sata/stp */ 429 TRANS_RX_ERR_WITH_RXFRAME_LENGTH_OVERRUN, /* 0x105 for smp */ 430 /*IO_ERR_WITH_RXFIS_TX SYNCP, [> 0x105 <] for sata/stp */ 431 TRANS_RX_ERR_WITH_RXFIS_RX_SYNCP, /* 0x106 for sata/stp*/ 432 TRANS_RX_ERR_WITH_LINK_BUF_OVERRUN, /* 0x107 */ 433 TRANS_RX_ERR_WITH_BREAK_TIMEOUT, /* 0x108 */ 434 TRANS_RX_ERR_WITH_BREAK_REQUEST, /* 0x109 */ 435 TRANS_RX_ERR_WITH_BREAK_RECEVIED, /* 0x10a */ 436 RESERVED1, /* 0x10b */ 437 TRANS_RX_ERR_WITH_CLOSE_NORMAL, /* 0x10c */ 438 TRANS_RX_ERR_WITH_CLOSE_PHY_DISABLE, /* 0x10d */ 439 TRANS_RX_ERR_WITH_CLOSE_DWS_TIMEOUT, /* 0x10e */ 440 TRANS_RX_ERR_WITH_CLOSE_COMINIT, /* 0x10f */ 441 TRANS_RX_ERR_WITH_DATA_LEN0, /* 0x110 for ssp/smp */ 442 TRANS_RX_ERR_WITH_BAD_HASH, /* 0x111 for ssp */ 443 /*IO_RX_ERR_WITH_FIS_TOO_SHORT, [> 0x111 <] for sata/stp */ 444 TRANS_RX_XRDY_WLEN_ZERO_ERR, /* 0x112 for ssp*/ 445 /*IO_RX_ERR_WITH_FIS_TOO_LONG, [> 0x112 <] for sata/stp */ 446 TRANS_RX_SSP_FRM_LEN_ERR, /* 0x113 for ssp */ 447 /*IO_RX_ERR_WITH_SATA_DEVICE_LOST, [> 0x113 <] for sata */ 448 RESERVED2, /* 0x114 */ 449 RESERVED3, /* 0x115 */ 450 RESERVED4, /* 0x116 */ 451 RESERVED5, /* 0x117 */ 452 TRANS_RX_ERR_WITH_BAD_FRM_TYPE, /* 0x118 */ 453 TRANS_RX_SMP_FRM_LEN_ERR, /* 0x119 */ 454 TRANS_RX_SMP_RESP_TIMEOUT_ERR, /* 0x11a */ 455 RESERVED6, /* 0x11b */ 456 RESERVED7, /* 0x11c */ 457 RESERVED8, /* 0x11d */ 458 RESERVED9, /* 0x11e */ 459 TRANS_RX_R_ERR, /* 0x11f */ 460 461 /* dma tx */ 462 DMA_TX_DIF_CRC_ERR = DMA_TX_ERR_BASE, /* 0x200 */ 463 DMA_TX_DIF_APP_ERR, /* 0x201 */ 464 DMA_TX_DIF_RPP_ERR, /* 0x202 */ 465 DMA_TX_DATA_SGL_OVERFLOW, /* 0x203 */ 466 DMA_TX_DIF_SGL_OVERFLOW, /* 0x204 */ 467 DMA_TX_UNEXP_XFER_ERR, /* 0x205 */ 468 DMA_TX_UNEXP_RETRANS_ERR, /* 0x206 */ 469 DMA_TX_XFER_LEN_OVERFLOW, /* 0x207 */ 470 DMA_TX_XFER_OFFSET_ERR, /* 0x208 */ 471 DMA_TX_RAM_ECC_ERR, /* 0x209 */ 472 DMA_TX_DIF_LEN_ALIGN_ERR, /* 0x20a */ 473 474 /* sipc rx */ 475 SIPC_RX_FIS_STATUS_ERR_BIT_VLD = SIPC_RX_ERR_BASE, /* 0x300 */ 476 SIPC_RX_PIO_WRSETUP_STATUS_DRQ_ERR, /* 0x301 */ 477 SIPC_RX_FIS_STATUS_BSY_BIT_ERR, /* 0x302 */ 478 SIPC_RX_WRSETUP_LEN_ODD_ERR, /* 0x303 */ 479 SIPC_RX_WRSETUP_LEN_ZERO_ERR, /* 0x304 */ 480 SIPC_RX_WRDATA_LEN_NOT_MATCH_ERR, /* 0x305 */ 481 SIPC_RX_NCQ_WRSETUP_OFFSET_ERR, /* 0x306 */ 482 SIPC_RX_NCQ_WRSETUP_AUTO_ACTIVE_ERR, /* 0x307 */ 483 SIPC_RX_SATA_UNEXP_FIS_ERR, /* 0x308 */ 484 SIPC_RX_WRSETUP_ESTATUS_ERR, /* 0x309 */ 485 SIPC_RX_DATA_UNDERFLOW_ERR, /* 0x30a */ 486 487 /* dma rx */ 488 DMA_RX_DIF_CRC_ERR = DMA_RX_ERR_BASE, /* 0x400 */ 489 DMA_RX_DIF_APP_ERR, /* 0x401 */ 490 DMA_RX_DIF_RPP_ERR, /* 0x402 */ 491 DMA_RX_DATA_SGL_OVERFLOW, /* 0x403 */ 492 DMA_RX_DIF_SGL_OVERFLOW, /* 0x404 */ 493 DMA_RX_DATA_LEN_OVERFLOW, /* 0x405 */ 494 DMA_RX_DATA_LEN_UNDERFLOW, /* 0x406 */ 495 DMA_RX_DATA_OFFSET_ERR, /* 0x407 */ 496 RESERVED10, /* 0x408 */ 497 DMA_RX_SATA_FRAME_TYPE_ERR, /* 0x409 */ 498 DMA_RX_RESP_BUF_OVERFLOW, /* 0x40a */ 499 DMA_RX_UNEXP_RETRANS_RESP_ERR, /* 0x40b */ 500 DMA_RX_UNEXP_NORM_RESP_ERR, /* 0x40c */ 501 DMA_RX_UNEXP_RDFRAME_ERR, /* 0x40d */ 502 DMA_RX_PIO_DATA_LEN_ERR, /* 0x40e */ 503 DMA_RX_RDSETUP_STATUS_ERR, /* 0x40f */ 504 DMA_RX_RDSETUP_STATUS_DRQ_ERR, /* 0x410 */ 505 DMA_RX_RDSETUP_STATUS_BSY_ERR, /* 0x411 */ 506 DMA_RX_RDSETUP_LEN_ODD_ERR, /* 0x412 */ 507 DMA_RX_RDSETUP_LEN_ZERO_ERR, /* 0x413 */ 508 DMA_RX_RDSETUP_LEN_OVER_ERR, /* 0x414 */ 509 DMA_RX_RDSETUP_OFFSET_ERR, /* 0x415 */ 510 DMA_RX_RDSETUP_ACTIVE_ERR, /* 0x416 */ 511 DMA_RX_RDSETUP_ESTATUS_ERR, /* 0x417 */ 512 DMA_RX_RAM_ECC_ERR, /* 0x418 */ 513 DMA_RX_UNKNOWN_FRM_ERR, /* 0x419 */ 514 }; 515 516 #define HISI_SAS_COMMAND_ENTRIES_V2_HW 4096 517 518 #define DIR_NO_DATA 0 519 #define DIR_TO_INI 1 520 #define DIR_TO_DEVICE 2 521 #define DIR_RESERVED 3 522 523 #define SATA_PROTOCOL_NONDATA 0x1 524 #define SATA_PROTOCOL_PIO 0x2 525 #define SATA_PROTOCOL_DMA 0x4 526 #define SATA_PROTOCOL_FPDMA 0x8 527 #define SATA_PROTOCOL_ATAPI 0x10 528 529 static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off) 530 { 531 void __iomem *regs = hisi_hba->regs + off; 532 533 return readl(regs); 534 } 535 536 static u32 hisi_sas_read32_relaxed(struct hisi_hba *hisi_hba, u32 off) 537 { 538 void __iomem *regs = hisi_hba->regs + off; 539 540 return readl_relaxed(regs); 541 } 542 543 static void hisi_sas_write32(struct hisi_hba *hisi_hba, u32 off, u32 val) 544 { 545 void __iomem *regs = hisi_hba->regs + off; 546 547 writel(val, regs); 548 } 549 550 static void hisi_sas_phy_write32(struct hisi_hba *hisi_hba, int phy_no, 551 u32 off, u32 val) 552 { 553 void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off; 554 555 writel(val, regs); 556 } 557 558 static u32 hisi_sas_phy_read32(struct hisi_hba *hisi_hba, 559 int phy_no, u32 off) 560 { 561 void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off; 562 563 return readl(regs); 564 } 565 566 /* This function needs to be protected from pre-emption. */ 567 static int 568 slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba, int *slot_idx, 569 struct domain_device *device) 570 { 571 unsigned int index = 0; 572 void *bitmap = hisi_hba->slot_index_tags; 573 int sata_dev = dev_is_sata(device); 574 575 while (1) { 576 index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count, 577 index); 578 if (index >= hisi_hba->slot_index_count) 579 return -SAS_QUEUE_FULL; 580 /* 581 * SAS IPTT bit0 should be 1 582 */ 583 if (sata_dev || (index & 1)) 584 break; 585 index++; 586 } 587 588 set_bit(index, bitmap); 589 *slot_idx = index; 590 return 0; 591 } 592 593 static struct 594 hisi_sas_device *alloc_dev_quirk_v2_hw(struct domain_device *device) 595 { 596 struct hisi_hba *hisi_hba = device->port->ha->lldd_ha; 597 struct hisi_sas_device *sas_dev = NULL; 598 int i, sata_dev = dev_is_sata(device); 599 600 spin_lock(&hisi_hba->lock); 601 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 602 /* 603 * SATA device id bit0 should be 0 604 */ 605 if (sata_dev && (i & 1)) 606 continue; 607 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) { 608 hisi_hba->devices[i].device_id = i; 609 sas_dev = &hisi_hba->devices[i]; 610 sas_dev->dev_status = HISI_SAS_DEV_NORMAL; 611 sas_dev->dev_type = device->dev_type; 612 sas_dev->hisi_hba = hisi_hba; 613 sas_dev->sas_device = device; 614 break; 615 } 616 } 617 spin_unlock(&hisi_hba->lock); 618 619 return sas_dev; 620 } 621 622 static void config_phy_opt_mode_v2_hw(struct hisi_hba *hisi_hba, int phy_no) 623 { 624 u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); 625 626 cfg &= ~PHY_CFG_DC_OPT_MSK; 627 cfg |= 1 << PHY_CFG_DC_OPT_OFF; 628 hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); 629 } 630 631 static void config_id_frame_v2_hw(struct hisi_hba *hisi_hba, int phy_no) 632 { 633 struct sas_identify_frame identify_frame; 634 u32 *identify_buffer; 635 636 memset(&identify_frame, 0, sizeof(identify_frame)); 637 identify_frame.dev_type = SAS_END_DEVICE; 638 identify_frame.frame_type = 0; 639 identify_frame._un1 = 1; 640 identify_frame.initiator_bits = SAS_PROTOCOL_ALL; 641 identify_frame.target_bits = SAS_PROTOCOL_NONE; 642 memcpy(&identify_frame._un4_11[0], hisi_hba->sas_addr, SAS_ADDR_SIZE); 643 memcpy(&identify_frame.sas_addr[0], hisi_hba->sas_addr, SAS_ADDR_SIZE); 644 identify_frame.phy_id = phy_no; 645 identify_buffer = (u32 *)(&identify_frame); 646 647 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD0, 648 __swab32(identify_buffer[0])); 649 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD1, 650 __swab32(identify_buffer[1])); 651 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD2, 652 __swab32(identify_buffer[2])); 653 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD3, 654 __swab32(identify_buffer[3])); 655 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD4, 656 __swab32(identify_buffer[4])); 657 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD5, 658 __swab32(identify_buffer[5])); 659 } 660 661 static void setup_itct_v2_hw(struct hisi_hba *hisi_hba, 662 struct hisi_sas_device *sas_dev) 663 { 664 struct domain_device *device = sas_dev->sas_device; 665 struct device *dev = &hisi_hba->pdev->dev; 666 u64 qw0, device_id = sas_dev->device_id; 667 struct hisi_sas_itct *itct = &hisi_hba->itct[device_id]; 668 struct domain_device *parent_dev = device->parent; 669 struct hisi_sas_port *port = device->port->lldd_port; 670 671 memset(itct, 0, sizeof(*itct)); 672 673 /* qw0 */ 674 qw0 = 0; 675 switch (sas_dev->dev_type) { 676 case SAS_END_DEVICE: 677 case SAS_EDGE_EXPANDER_DEVICE: 678 case SAS_FANOUT_EXPANDER_DEVICE: 679 qw0 = HISI_SAS_DEV_TYPE_SSP << ITCT_HDR_DEV_TYPE_OFF; 680 break; 681 case SAS_SATA_DEV: 682 case SAS_SATA_PENDING: 683 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) 684 qw0 = HISI_SAS_DEV_TYPE_STP << ITCT_HDR_DEV_TYPE_OFF; 685 else 686 qw0 = HISI_SAS_DEV_TYPE_SATA << ITCT_HDR_DEV_TYPE_OFF; 687 break; 688 default: 689 dev_warn(dev, "setup itct: unsupported dev type (%d)\n", 690 sas_dev->dev_type); 691 } 692 693 qw0 |= ((1 << ITCT_HDR_VALID_OFF) | 694 (device->linkrate << ITCT_HDR_MCR_OFF) | 695 (1 << ITCT_HDR_VLN_OFF) | 696 (port->id << ITCT_HDR_PORT_ID_OFF)); 697 itct->qw0 = cpu_to_le64(qw0); 698 699 /* qw1 */ 700 memcpy(&itct->sas_addr, device->sas_addr, SAS_ADDR_SIZE); 701 itct->sas_addr = __swab64(itct->sas_addr); 702 703 /* qw2 */ 704 if (!dev_is_sata(device)) 705 itct->qw2 = cpu_to_le64((500ULL << ITCT_HDR_INLT_OFF) | 706 (0x1ULL << ITCT_HDR_BITLT_OFF) | 707 (0x32ULL << ITCT_HDR_MCTLT_OFF) | 708 (0x1ULL << ITCT_HDR_RTOLT_OFF)); 709 } 710 711 static void free_device_v2_hw(struct hisi_hba *hisi_hba, 712 struct hisi_sas_device *sas_dev) 713 { 714 u64 qw0, dev_id = sas_dev->device_id; 715 struct device *dev = &hisi_hba->pdev->dev; 716 struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id]; 717 u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3); 718 int i; 719 720 /* clear the itct interrupt state */ 721 if (ENT_INT_SRC3_ITC_INT_MSK & reg_val) 722 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 723 ENT_INT_SRC3_ITC_INT_MSK); 724 725 /* clear the itct int*/ 726 for (i = 0; i < 2; i++) { 727 /* clear the itct table*/ 728 reg_val = hisi_sas_read32(hisi_hba, ITCT_CLR); 729 reg_val |= ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK); 730 hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val); 731 732 udelay(10); 733 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3); 734 if (ENT_INT_SRC3_ITC_INT_MSK & reg_val) { 735 dev_dbg(dev, "got clear ITCT done interrupt\n"); 736 737 /* invalid the itct state*/ 738 qw0 = cpu_to_le64(itct->qw0); 739 qw0 &= ~(1 << ITCT_HDR_VALID_OFF); 740 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 741 ENT_INT_SRC3_ITC_INT_MSK); 742 743 /* clear the itct */ 744 hisi_sas_write32(hisi_hba, ITCT_CLR, 0); 745 dev_dbg(dev, "clear ITCT ok\n"); 746 break; 747 } 748 } 749 } 750 751 static int reset_hw_v2_hw(struct hisi_hba *hisi_hba) 752 { 753 int i, reset_val; 754 u32 val; 755 unsigned long end_time; 756 struct device *dev = &hisi_hba->pdev->dev; 757 758 /* The mask needs to be set depending on the number of phys */ 759 if (hisi_hba->n_phy == 9) 760 reset_val = 0x1fffff; 761 else 762 reset_val = 0x7ffff; 763 764 hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0); 765 766 /* Disable all of the PHYs */ 767 for (i = 0; i < hisi_hba->n_phy; i++) { 768 u32 phy_cfg = hisi_sas_phy_read32(hisi_hba, i, PHY_CFG); 769 770 phy_cfg &= ~PHY_CTRL_RESET_MSK; 771 hisi_sas_phy_write32(hisi_hba, i, PHY_CFG, phy_cfg); 772 } 773 udelay(50); 774 775 /* Ensure DMA tx & rx idle */ 776 for (i = 0; i < hisi_hba->n_phy; i++) { 777 u32 dma_tx_status, dma_rx_status; 778 779 end_time = jiffies + msecs_to_jiffies(1000); 780 781 while (1) { 782 dma_tx_status = hisi_sas_phy_read32(hisi_hba, i, 783 DMA_TX_STATUS); 784 dma_rx_status = hisi_sas_phy_read32(hisi_hba, i, 785 DMA_RX_STATUS); 786 787 if (!(dma_tx_status & DMA_TX_STATUS_BUSY_MSK) && 788 !(dma_rx_status & DMA_RX_STATUS_BUSY_MSK)) 789 break; 790 791 msleep(20); 792 if (time_after(jiffies, end_time)) 793 return -EIO; 794 } 795 } 796 797 /* Ensure axi bus idle */ 798 end_time = jiffies + msecs_to_jiffies(1000); 799 while (1) { 800 u32 axi_status = 801 hisi_sas_read32(hisi_hba, AXI_CFG); 802 803 if (axi_status == 0) 804 break; 805 806 msleep(20); 807 if (time_after(jiffies, end_time)) 808 return -EIO; 809 } 810 811 if (ACPI_HANDLE(dev)) { 812 acpi_status s; 813 814 s = acpi_evaluate_object(ACPI_HANDLE(dev), "_RST", NULL, NULL); 815 if (ACPI_FAILURE(s)) { 816 dev_err(dev, "Reset failed\n"); 817 return -EIO; 818 } 819 } else if (hisi_hba->ctrl) { 820 /* reset and disable clock*/ 821 regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_reset_reg, 822 reset_val); 823 regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_clock_ena_reg + 4, 824 reset_val); 825 msleep(1); 826 regmap_read(hisi_hba->ctrl, hisi_hba->ctrl_reset_sts_reg, &val); 827 if (reset_val != (val & reset_val)) { 828 dev_err(dev, "SAS reset fail.\n"); 829 return -EIO; 830 } 831 832 /* De-reset and enable clock*/ 833 regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_reset_reg + 4, 834 reset_val); 835 regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_clock_ena_reg, 836 reset_val); 837 msleep(1); 838 regmap_read(hisi_hba->ctrl, hisi_hba->ctrl_reset_sts_reg, 839 &val); 840 if (val & reset_val) { 841 dev_err(dev, "SAS de-reset fail.\n"); 842 return -EIO; 843 } 844 } else 845 dev_warn(dev, "no reset method\n"); 846 847 return 0; 848 } 849 850 static void init_reg_v2_hw(struct hisi_hba *hisi_hba) 851 { 852 struct device *dev = &hisi_hba->pdev->dev; 853 int i; 854 855 /* Global registers init */ 856 857 /* Deal with am-max-transmissions quirk */ 858 if (device_property_present(dev, "hip06-sas-v2-quirk-amt")) { 859 hisi_sas_write32(hisi_hba, AM_CFG_MAX_TRANS, 0x2020); 860 hisi_sas_write32(hisi_hba, AM_CFG_SINGLE_PORT_MAX_TRANS, 861 0x2020); 862 } /* Else, use defaults -> do nothing */ 863 864 hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 865 (u32)((1ULL << hisi_hba->queue_count) - 1)); 866 hisi_sas_write32(hisi_hba, AXI_USER1, 0xc0000000); 867 hisi_sas_write32(hisi_hba, AXI_USER2, 0x10000); 868 hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x108); 869 hisi_sas_write32(hisi_hba, HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL, 0x7FF); 870 hisi_sas_write32(hisi_hba, OPENA_WT_CONTI_TIME, 0x1); 871 hisi_sas_write32(hisi_hba, I_T_NEXUS_LOSS_TIME, 0x1F4); 872 hisi_sas_write32(hisi_hba, MAX_CON_TIME_LIMIT_TIME, 0x32); 873 hisi_sas_write32(hisi_hba, BUS_INACTIVE_LIMIT_TIME, 0x1); 874 hisi_sas_write32(hisi_hba, CFG_AGING_TIME, 0x1); 875 hisi_sas_write32(hisi_hba, HGC_ERR_STAT_EN, 0x1); 876 hisi_sas_write32(hisi_hba, HGC_GET_ITV_TIME, 0x1); 877 hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1); 878 hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1); 879 hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1); 880 hisi_sas_write32(hisi_hba, ENT_INT_COAL_TIME, 0x1); 881 hisi_sas_write32(hisi_hba, ENT_INT_COAL_CNT, 0x1); 882 hisi_sas_write32(hisi_hba, OQ_INT_SRC, 0x0); 883 hisi_sas_write32(hisi_hba, ENT_INT_SRC1, 0xffffffff); 884 hisi_sas_write32(hisi_hba, ENT_INT_SRC2, 0xffffffff); 885 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 0xffffffff); 886 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0x7efefefe); 887 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0x7efefefe); 888 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0x7ffffffe); 889 hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xfff00c30); 890 for (i = 0; i < hisi_hba->queue_count; i++) 891 hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK+0x4*i, 0); 892 893 hisi_sas_write32(hisi_hba, AXI_AHB_CLK_CFG, 1); 894 hisi_sas_write32(hisi_hba, HYPER_STREAM_ID_EN_CFG, 1); 895 896 for (i = 0; i < hisi_hba->n_phy; i++) { 897 hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE, 0x855); 898 hisi_sas_phy_write32(hisi_hba, i, SAS_PHY_CTRL, 0x30b9908); 899 hisi_sas_phy_write32(hisi_hba, i, SL_TOUT_CFG, 0x7d7d7d7d); 900 hisi_sas_phy_write32(hisi_hba, i, SL_CONTROL, 0x0); 901 hisi_sas_phy_write32(hisi_hba, i, TXID_AUTO, 0x2); 902 hisi_sas_phy_write32(hisi_hba, i, DONE_RECEIVED_TIME, 0x10); 903 hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, 0xffffffff); 904 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff); 905 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xfff87fff); 906 hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000); 907 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff); 908 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbff); 909 hisi_sas_phy_write32(hisi_hba, i, SL_CFG, 0x23f801fc); 910 hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0); 911 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0); 912 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_DWS_RESET_MSK, 0x0); 913 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_PHY_ENA_MSK, 0x0); 914 hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x0); 915 hisi_sas_phy_write32(hisi_hba, i, CHL_INT_COAL_EN, 0x0); 916 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x0); 917 if (hisi_hba->refclk_frequency_mhz == 66) 918 hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL, 0x199B694); 919 /* else, do nothing -> leave it how you found it */ 920 } 921 922 for (i = 0; i < hisi_hba->queue_count; i++) { 923 /* Delivery queue */ 924 hisi_sas_write32(hisi_hba, 925 DLVRY_Q_0_BASE_ADDR_HI + (i * 0x14), 926 upper_32_bits(hisi_hba->cmd_hdr_dma[i])); 927 928 hisi_sas_write32(hisi_hba, DLVRY_Q_0_BASE_ADDR_LO + (i * 0x14), 929 lower_32_bits(hisi_hba->cmd_hdr_dma[i])); 930 931 hisi_sas_write32(hisi_hba, DLVRY_Q_0_DEPTH + (i * 0x14), 932 HISI_SAS_QUEUE_SLOTS); 933 934 /* Completion queue */ 935 hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_HI + (i * 0x14), 936 upper_32_bits(hisi_hba->complete_hdr_dma[i])); 937 938 hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_LO + (i * 0x14), 939 lower_32_bits(hisi_hba->complete_hdr_dma[i])); 940 941 hisi_sas_write32(hisi_hba, COMPL_Q_0_DEPTH + (i * 0x14), 942 HISI_SAS_QUEUE_SLOTS); 943 } 944 945 /* itct */ 946 hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_LO, 947 lower_32_bits(hisi_hba->itct_dma)); 948 949 hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_HI, 950 upper_32_bits(hisi_hba->itct_dma)); 951 952 /* iost */ 953 hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_LO, 954 lower_32_bits(hisi_hba->iost_dma)); 955 956 hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_HI, 957 upper_32_bits(hisi_hba->iost_dma)); 958 959 /* breakpoint */ 960 hisi_sas_write32(hisi_hba, IO_BROKEN_MSG_ADDR_LO, 961 lower_32_bits(hisi_hba->breakpoint_dma)); 962 963 hisi_sas_write32(hisi_hba, IO_BROKEN_MSG_ADDR_HI, 964 upper_32_bits(hisi_hba->breakpoint_dma)); 965 966 /* SATA broken msg */ 967 hisi_sas_write32(hisi_hba, IO_SATA_BROKEN_MSG_ADDR_LO, 968 lower_32_bits(hisi_hba->sata_breakpoint_dma)); 969 970 hisi_sas_write32(hisi_hba, IO_SATA_BROKEN_MSG_ADDR_HI, 971 upper_32_bits(hisi_hba->sata_breakpoint_dma)); 972 973 /* SATA initial fis */ 974 hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_LO, 975 lower_32_bits(hisi_hba->initial_fis_dma)); 976 977 hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_HI, 978 upper_32_bits(hisi_hba->initial_fis_dma)); 979 } 980 981 static int hw_init_v2_hw(struct hisi_hba *hisi_hba) 982 { 983 struct device *dev = &hisi_hba->pdev->dev; 984 int rc; 985 986 rc = reset_hw_v2_hw(hisi_hba); 987 if (rc) { 988 dev_err(dev, "hisi_sas_reset_hw failed, rc=%d", rc); 989 return rc; 990 } 991 992 msleep(100); 993 init_reg_v2_hw(hisi_hba); 994 995 return 0; 996 } 997 998 static void enable_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no) 999 { 1000 u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); 1001 1002 cfg |= PHY_CFG_ENA_MSK; 1003 hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); 1004 } 1005 1006 static void disable_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no) 1007 { 1008 u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); 1009 1010 cfg &= ~PHY_CFG_ENA_MSK; 1011 hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); 1012 } 1013 1014 static void start_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no) 1015 { 1016 config_id_frame_v2_hw(hisi_hba, phy_no); 1017 config_phy_opt_mode_v2_hw(hisi_hba, phy_no); 1018 enable_phy_v2_hw(hisi_hba, phy_no); 1019 } 1020 1021 static void stop_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no) 1022 { 1023 disable_phy_v2_hw(hisi_hba, phy_no); 1024 } 1025 1026 static void phy_hard_reset_v2_hw(struct hisi_hba *hisi_hba, int phy_no) 1027 { 1028 stop_phy_v2_hw(hisi_hba, phy_no); 1029 msleep(100); 1030 start_phy_v2_hw(hisi_hba, phy_no); 1031 } 1032 1033 static void start_phys_v2_hw(unsigned long data) 1034 { 1035 struct hisi_hba *hisi_hba = (struct hisi_hba *)data; 1036 int i; 1037 1038 for (i = 0; i < hisi_hba->n_phy; i++) 1039 start_phy_v2_hw(hisi_hba, i); 1040 } 1041 1042 static void phys_init_v2_hw(struct hisi_hba *hisi_hba) 1043 { 1044 struct timer_list *timer = &hisi_hba->timer; 1045 1046 setup_timer(timer, start_phys_v2_hw, (unsigned long)hisi_hba); 1047 mod_timer(timer, jiffies + HZ); 1048 } 1049 1050 static void sl_notify_v2_hw(struct hisi_hba *hisi_hba, int phy_no) 1051 { 1052 u32 sl_control; 1053 1054 sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL); 1055 sl_control |= SL_CONTROL_NOTIFY_EN_MSK; 1056 hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control); 1057 msleep(1); 1058 sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL); 1059 sl_control &= ~SL_CONTROL_NOTIFY_EN_MSK; 1060 hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control); 1061 } 1062 1063 static enum sas_linkrate phy_get_max_linkrate_v2_hw(void) 1064 { 1065 return SAS_LINK_RATE_12_0_GBPS; 1066 } 1067 1068 static void phy_set_linkrate_v2_hw(struct hisi_hba *hisi_hba, int phy_no, 1069 struct sas_phy_linkrates *r) 1070 { 1071 u32 prog_phy_link_rate = 1072 hisi_sas_phy_read32(hisi_hba, phy_no, PROG_PHY_LINK_RATE); 1073 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1074 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1075 int i; 1076 enum sas_linkrate min, max; 1077 u32 rate_mask = 0; 1078 1079 if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) { 1080 max = sas_phy->phy->maximum_linkrate; 1081 min = r->minimum_linkrate; 1082 } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) { 1083 max = r->maximum_linkrate; 1084 min = sas_phy->phy->minimum_linkrate; 1085 } else 1086 return; 1087 1088 sas_phy->phy->maximum_linkrate = max; 1089 sas_phy->phy->minimum_linkrate = min; 1090 1091 min -= SAS_LINK_RATE_1_5_GBPS; 1092 max -= SAS_LINK_RATE_1_5_GBPS; 1093 1094 for (i = 0; i <= max; i++) 1095 rate_mask |= 1 << (i * 2); 1096 1097 prog_phy_link_rate &= ~0xff; 1098 prog_phy_link_rate |= rate_mask; 1099 1100 hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE, 1101 prog_phy_link_rate); 1102 1103 phy_hard_reset_v2_hw(hisi_hba, phy_no); 1104 } 1105 1106 static int get_wideport_bitmap_v2_hw(struct hisi_hba *hisi_hba, int port_id) 1107 { 1108 int i, bitmap = 0; 1109 u32 phy_port_num_ma = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA); 1110 u32 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE); 1111 1112 for (i = 0; i < (hisi_hba->n_phy < 9 ? hisi_hba->n_phy : 8); i++) 1113 if (phy_state & 1 << i) 1114 if (((phy_port_num_ma >> (i * 4)) & 0xf) == port_id) 1115 bitmap |= 1 << i; 1116 1117 if (hisi_hba->n_phy == 9) { 1118 u32 port_state = hisi_sas_read32(hisi_hba, PORT_STATE); 1119 1120 if (phy_state & 1 << 8) 1121 if (((port_state & PORT_STATE_PHY8_PORT_NUM_MSK) >> 1122 PORT_STATE_PHY8_PORT_NUM_OFF) == port_id) 1123 bitmap |= 1 << 9; 1124 } 1125 1126 return bitmap; 1127 } 1128 1129 /** 1130 * This function allocates across all queues to load balance. 1131 * Slots are allocated from queues in a round-robin fashion. 1132 * 1133 * The callpath to this function and upto writing the write 1134 * queue pointer should be safe from interruption. 1135 */ 1136 static int get_free_slot_v2_hw(struct hisi_hba *hisi_hba, u32 dev_id, 1137 int *q, int *s) 1138 { 1139 struct device *dev = &hisi_hba->pdev->dev; 1140 struct hisi_sas_dq *dq; 1141 u32 r, w; 1142 int queue = dev_id % hisi_hba->queue_count; 1143 1144 dq = &hisi_hba->dq[queue]; 1145 w = dq->wr_point; 1146 r = hisi_sas_read32_relaxed(hisi_hba, 1147 DLVRY_Q_0_RD_PTR + (queue * 0x14)); 1148 if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) { 1149 dev_warn(dev, "full queue=%d r=%d w=%d\n\n", 1150 queue, r, w); 1151 return -EAGAIN; 1152 } 1153 1154 *q = queue; 1155 *s = w; 1156 return 0; 1157 } 1158 1159 static void start_delivery_v2_hw(struct hisi_hba *hisi_hba) 1160 { 1161 int dlvry_queue = hisi_hba->slot_prep->dlvry_queue; 1162 int dlvry_queue_slot = hisi_hba->slot_prep->dlvry_queue_slot; 1163 struct hisi_sas_dq *dq = &hisi_hba->dq[dlvry_queue]; 1164 1165 dq->wr_point = ++dlvry_queue_slot % HISI_SAS_QUEUE_SLOTS; 1166 hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), 1167 dq->wr_point); 1168 } 1169 1170 static int prep_prd_sge_v2_hw(struct hisi_hba *hisi_hba, 1171 struct hisi_sas_slot *slot, 1172 struct hisi_sas_cmd_hdr *hdr, 1173 struct scatterlist *scatter, 1174 int n_elem) 1175 { 1176 struct device *dev = &hisi_hba->pdev->dev; 1177 struct scatterlist *sg; 1178 int i; 1179 1180 if (n_elem > HISI_SAS_SGE_PAGE_CNT) { 1181 dev_err(dev, "prd err: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT", 1182 n_elem); 1183 return -EINVAL; 1184 } 1185 1186 slot->sge_page = dma_pool_alloc(hisi_hba->sge_page_pool, GFP_ATOMIC, 1187 &slot->sge_page_dma); 1188 if (!slot->sge_page) 1189 return -ENOMEM; 1190 1191 for_each_sg(scatter, sg, n_elem, i) { 1192 struct hisi_sas_sge *entry = &slot->sge_page->sge[i]; 1193 1194 entry->addr = cpu_to_le64(sg_dma_address(sg)); 1195 entry->page_ctrl_0 = entry->page_ctrl_1 = 0; 1196 entry->data_len = cpu_to_le32(sg_dma_len(sg)); 1197 entry->data_off = 0; 1198 } 1199 1200 hdr->prd_table_addr = cpu_to_le64(slot->sge_page_dma); 1201 1202 hdr->sg_len = cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF); 1203 1204 return 0; 1205 } 1206 1207 static int prep_smp_v2_hw(struct hisi_hba *hisi_hba, 1208 struct hisi_sas_slot *slot) 1209 { 1210 struct sas_task *task = slot->task; 1211 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; 1212 struct domain_device *device = task->dev; 1213 struct device *dev = &hisi_hba->pdev->dev; 1214 struct hisi_sas_port *port = slot->port; 1215 struct scatterlist *sg_req, *sg_resp; 1216 struct hisi_sas_device *sas_dev = device->lldd_dev; 1217 dma_addr_t req_dma_addr; 1218 unsigned int req_len, resp_len; 1219 int elem, rc; 1220 1221 /* 1222 * DMA-map SMP request, response buffers 1223 */ 1224 /* req */ 1225 sg_req = &task->smp_task.smp_req; 1226 elem = dma_map_sg(dev, sg_req, 1, DMA_TO_DEVICE); 1227 if (!elem) 1228 return -ENOMEM; 1229 req_len = sg_dma_len(sg_req); 1230 req_dma_addr = sg_dma_address(sg_req); 1231 1232 /* resp */ 1233 sg_resp = &task->smp_task.smp_resp; 1234 elem = dma_map_sg(dev, sg_resp, 1, DMA_FROM_DEVICE); 1235 if (!elem) { 1236 rc = -ENOMEM; 1237 goto err_out_req; 1238 } 1239 resp_len = sg_dma_len(sg_resp); 1240 if ((req_len & 0x3) || (resp_len & 0x3)) { 1241 rc = -EINVAL; 1242 goto err_out_resp; 1243 } 1244 1245 /* create header */ 1246 /* dw0 */ 1247 hdr->dw0 = cpu_to_le32((port->id << CMD_HDR_PORT_OFF) | 1248 (1 << CMD_HDR_PRIORITY_OFF) | /* high pri */ 1249 (2 << CMD_HDR_CMD_OFF)); /* smp */ 1250 1251 /* map itct entry */ 1252 hdr->dw1 = cpu_to_le32((sas_dev->device_id << CMD_HDR_DEV_ID_OFF) | 1253 (1 << CMD_HDR_FRAME_TYPE_OFF) | 1254 (DIR_NO_DATA << CMD_HDR_DIR_OFF)); 1255 1256 /* dw2 */ 1257 hdr->dw2 = cpu_to_le32((((req_len - 4) / 4) << CMD_HDR_CFL_OFF) | 1258 (HISI_SAS_MAX_SMP_RESP_SZ / 4 << 1259 CMD_HDR_MRFL_OFF)); 1260 1261 hdr->transfer_tags = cpu_to_le32(slot->idx << CMD_HDR_IPTT_OFF); 1262 1263 hdr->cmd_table_addr = cpu_to_le64(req_dma_addr); 1264 hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma); 1265 1266 return 0; 1267 1268 err_out_resp: 1269 dma_unmap_sg(dev, &slot->task->smp_task.smp_resp, 1, 1270 DMA_FROM_DEVICE); 1271 err_out_req: 1272 dma_unmap_sg(dev, &slot->task->smp_task.smp_req, 1, 1273 DMA_TO_DEVICE); 1274 return rc; 1275 } 1276 1277 static int prep_ssp_v2_hw(struct hisi_hba *hisi_hba, 1278 struct hisi_sas_slot *slot, int is_tmf, 1279 struct hisi_sas_tmf_task *tmf) 1280 { 1281 struct sas_task *task = slot->task; 1282 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; 1283 struct domain_device *device = task->dev; 1284 struct hisi_sas_device *sas_dev = device->lldd_dev; 1285 struct hisi_sas_port *port = slot->port; 1286 struct sas_ssp_task *ssp_task = &task->ssp_task; 1287 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; 1288 int has_data = 0, rc, priority = is_tmf; 1289 u8 *buf_cmd; 1290 u32 dw1 = 0, dw2 = 0; 1291 1292 hdr->dw0 = cpu_to_le32((1 << CMD_HDR_RESP_REPORT_OFF) | 1293 (2 << CMD_HDR_TLR_CTRL_OFF) | 1294 (port->id << CMD_HDR_PORT_OFF) | 1295 (priority << CMD_HDR_PRIORITY_OFF) | 1296 (1 << CMD_HDR_CMD_OFF)); /* ssp */ 1297 1298 dw1 = 1 << CMD_HDR_VDTL_OFF; 1299 if (is_tmf) { 1300 dw1 |= 2 << CMD_HDR_FRAME_TYPE_OFF; 1301 dw1 |= DIR_NO_DATA << CMD_HDR_DIR_OFF; 1302 } else { 1303 dw1 |= 1 << CMD_HDR_FRAME_TYPE_OFF; 1304 switch (scsi_cmnd->sc_data_direction) { 1305 case DMA_TO_DEVICE: 1306 has_data = 1; 1307 dw1 |= DIR_TO_DEVICE << CMD_HDR_DIR_OFF; 1308 break; 1309 case DMA_FROM_DEVICE: 1310 has_data = 1; 1311 dw1 |= DIR_TO_INI << CMD_HDR_DIR_OFF; 1312 break; 1313 default: 1314 dw1 &= ~CMD_HDR_DIR_MSK; 1315 } 1316 } 1317 1318 /* map itct entry */ 1319 dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF; 1320 hdr->dw1 = cpu_to_le32(dw1); 1321 1322 dw2 = (((sizeof(struct ssp_command_iu) + sizeof(struct ssp_frame_hdr) 1323 + 3) / 4) << CMD_HDR_CFL_OFF) | 1324 ((HISI_SAS_MAX_SSP_RESP_SZ / 4) << CMD_HDR_MRFL_OFF) | 1325 (2 << CMD_HDR_SG_MOD_OFF); 1326 hdr->dw2 = cpu_to_le32(dw2); 1327 1328 hdr->transfer_tags = cpu_to_le32(slot->idx); 1329 1330 if (has_data) { 1331 rc = prep_prd_sge_v2_hw(hisi_hba, slot, hdr, task->scatter, 1332 slot->n_elem); 1333 if (rc) 1334 return rc; 1335 } 1336 1337 hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len); 1338 hdr->cmd_table_addr = cpu_to_le64(slot->command_table_dma); 1339 hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma); 1340 1341 buf_cmd = slot->command_table + sizeof(struct ssp_frame_hdr); 1342 1343 memcpy(buf_cmd, &task->ssp_task.LUN, 8); 1344 if (!is_tmf) { 1345 buf_cmd[9] = task->ssp_task.task_attr | 1346 (task->ssp_task.task_prio << 3); 1347 memcpy(buf_cmd + 12, task->ssp_task.cmd->cmnd, 1348 task->ssp_task.cmd->cmd_len); 1349 } else { 1350 buf_cmd[10] = tmf->tmf; 1351 switch (tmf->tmf) { 1352 case TMF_ABORT_TASK: 1353 case TMF_QUERY_TASK: 1354 buf_cmd[12] = 1355 (tmf->tag_of_task_to_be_managed >> 8) & 0xff; 1356 buf_cmd[13] = 1357 tmf->tag_of_task_to_be_managed & 0xff; 1358 break; 1359 default: 1360 break; 1361 } 1362 } 1363 1364 return 0; 1365 } 1366 1367 static void sata_done_v2_hw(struct hisi_hba *hisi_hba, struct sas_task *task, 1368 struct hisi_sas_slot *slot) 1369 { 1370 struct task_status_struct *ts = &task->task_status; 1371 struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf; 1372 struct dev_to_host_fis *d2h = slot->status_buffer + 1373 sizeof(struct hisi_sas_err_record); 1374 1375 resp->frame_len = sizeof(struct dev_to_host_fis); 1376 memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis)); 1377 1378 ts->buf_valid_size = sizeof(*resp); 1379 } 1380 1381 /* by default, task resp is complete */ 1382 static void slot_err_v2_hw(struct hisi_hba *hisi_hba, 1383 struct sas_task *task, 1384 struct hisi_sas_slot *slot) 1385 { 1386 struct task_status_struct *ts = &task->task_status; 1387 struct hisi_sas_err_record_v2 *err_record = slot->status_buffer; 1388 u32 trans_tx_fail_type = cpu_to_le32(err_record->trans_tx_fail_type); 1389 u32 trans_rx_fail_type = cpu_to_le32(err_record->trans_rx_fail_type); 1390 u16 dma_tx_err_type = cpu_to_le16(err_record->dma_tx_err_type); 1391 u16 sipc_rx_err_type = cpu_to_le16(err_record->sipc_rx_err_type); 1392 u32 dma_rx_err_type = cpu_to_le32(err_record->dma_rx_err_type); 1393 int error = -1; 1394 1395 if (dma_rx_err_type) { 1396 error = ffs(dma_rx_err_type) 1397 - 1 + DMA_RX_ERR_BASE; 1398 } else if (sipc_rx_err_type) { 1399 error = ffs(sipc_rx_err_type) 1400 - 1 + SIPC_RX_ERR_BASE; 1401 } else if (dma_tx_err_type) { 1402 error = ffs(dma_tx_err_type) 1403 - 1 + DMA_TX_ERR_BASE; 1404 } else if (trans_rx_fail_type) { 1405 error = ffs(trans_rx_fail_type) 1406 - 1 + TRANS_RX_FAIL_BASE; 1407 } else if (trans_tx_fail_type) { 1408 error = ffs(trans_tx_fail_type) 1409 - 1 + TRANS_TX_FAIL_BASE; 1410 } 1411 1412 switch (task->task_proto) { 1413 case SAS_PROTOCOL_SSP: 1414 { 1415 switch (error) { 1416 case TRANS_TX_OPEN_CNX_ERR_NO_DESTINATION: 1417 { 1418 ts->stat = SAS_OPEN_REJECT; 1419 ts->open_rej_reason = SAS_OREJ_NO_DEST; 1420 break; 1421 } 1422 case TRANS_TX_OPEN_CNX_ERR_PATHWAY_BLOCKED: 1423 { 1424 ts->stat = SAS_OPEN_REJECT; 1425 ts->open_rej_reason = SAS_OREJ_PATH_BLOCKED; 1426 break; 1427 } 1428 case TRANS_TX_OPEN_CNX_ERR_PROTOCOL_NOT_SUPPORTED: 1429 { 1430 ts->stat = SAS_OPEN_REJECT; 1431 ts->open_rej_reason = SAS_OREJ_EPROTO; 1432 break; 1433 } 1434 case TRANS_TX_OPEN_CNX_ERR_CONNECTION_RATE_NOT_SUPPORTED: 1435 { 1436 ts->stat = SAS_OPEN_REJECT; 1437 ts->open_rej_reason = SAS_OREJ_CONN_RATE; 1438 break; 1439 } 1440 case TRANS_TX_OPEN_CNX_ERR_BAD_DESTINATION: 1441 { 1442 ts->stat = SAS_OPEN_REJECT; 1443 ts->open_rej_reason = SAS_OREJ_BAD_DEST; 1444 break; 1445 } 1446 case TRANS_TX_OPEN_CNX_ERR_BREAK_RCVD: 1447 { 1448 ts->stat = SAS_OPEN_REJECT; 1449 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; 1450 break; 1451 } 1452 case TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION: 1453 { 1454 ts->stat = SAS_OPEN_REJECT; 1455 ts->open_rej_reason = SAS_OREJ_WRONG_DEST; 1456 break; 1457 } 1458 case TRANS_TX_OPEN_CNX_ERR_ZONE_VIOLATION: 1459 { 1460 ts->stat = SAS_OPEN_REJECT; 1461 ts->open_rej_reason = SAS_OREJ_UNKNOWN; 1462 break; 1463 } 1464 case TRANS_TX_OPEN_CNX_ERR_LOW_PHY_POWER: 1465 { 1466 /* not sure */ 1467 ts->stat = SAS_DEV_NO_RESPONSE; 1468 break; 1469 } 1470 case TRANS_RX_ERR_WITH_CLOSE_PHY_DISABLE: 1471 { 1472 ts->stat = SAS_PHY_DOWN; 1473 break; 1474 } 1475 case TRANS_TX_OPEN_CNX_ERR_OPEN_TIMEOUT: 1476 { 1477 ts->stat = SAS_OPEN_TO; 1478 break; 1479 } 1480 case DMA_RX_DATA_LEN_OVERFLOW: 1481 { 1482 ts->stat = SAS_DATA_OVERRUN; 1483 ts->residual = 0; 1484 break; 1485 } 1486 case DMA_RX_DATA_LEN_UNDERFLOW: 1487 case SIPC_RX_DATA_UNDERFLOW_ERR: 1488 { 1489 ts->residual = trans_tx_fail_type; 1490 ts->stat = SAS_DATA_UNDERRUN; 1491 break; 1492 } 1493 case TRANS_TX_ERR_FRAME_TXED: 1494 { 1495 /* This will request a retry */ 1496 ts->stat = SAS_QUEUE_FULL; 1497 slot->abort = 1; 1498 break; 1499 } 1500 case TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS: 1501 case TRANS_TX_ERR_PHY_NOT_ENABLE: 1502 case TRANS_TX_OPEN_CNX_ERR_BY_OTHER: 1503 case TRANS_TX_OPEN_CNX_ERR_AIP_TIMEOUT: 1504 case TRANS_TX_OPEN_RETRY_ERR_THRESHOLD_REACHED: 1505 case TRANS_TX_ERR_WITH_BREAK_TIMEOUT: 1506 case TRANS_TX_ERR_WITH_BREAK_REQUEST: 1507 case TRANS_TX_ERR_WITH_BREAK_RECEVIED: 1508 case TRANS_TX_ERR_WITH_CLOSE_TIMEOUT: 1509 case TRANS_TX_ERR_WITH_CLOSE_NORMAL: 1510 case TRANS_TX_ERR_WITH_CLOSE_DWS_TIMEOUT: 1511 case TRANS_TX_ERR_WITH_CLOSE_COMINIT: 1512 case TRANS_TX_ERR_WITH_NAK_RECEVIED: 1513 case TRANS_TX_ERR_WITH_ACK_NAK_TIMEOUT: 1514 case TRANS_TX_ERR_WITH_IPTT_CONFLICT: 1515 case TRANS_TX_ERR_WITH_CREDIT_TIMEOUT: 1516 case TRANS_RX_ERR_WITH_RXFRAME_CRC_ERR: 1517 case TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR: 1518 case TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM: 1519 case TRANS_RX_ERR_WITH_BREAK_TIMEOUT: 1520 case TRANS_RX_ERR_WITH_BREAK_REQUEST: 1521 case TRANS_RX_ERR_WITH_BREAK_RECEVIED: 1522 case TRANS_RX_ERR_WITH_CLOSE_NORMAL: 1523 case TRANS_RX_ERR_WITH_CLOSE_DWS_TIMEOUT: 1524 case TRANS_RX_ERR_WITH_CLOSE_COMINIT: 1525 case TRANS_RX_ERR_WITH_DATA_LEN0: 1526 case TRANS_RX_ERR_WITH_BAD_HASH: 1527 case TRANS_RX_XRDY_WLEN_ZERO_ERR: 1528 case TRANS_RX_SSP_FRM_LEN_ERR: 1529 case TRANS_RX_ERR_WITH_BAD_FRM_TYPE: 1530 case DMA_TX_UNEXP_XFER_ERR: 1531 case DMA_TX_UNEXP_RETRANS_ERR: 1532 case DMA_TX_XFER_LEN_OVERFLOW: 1533 case DMA_TX_XFER_OFFSET_ERR: 1534 case DMA_RX_DATA_OFFSET_ERR: 1535 case DMA_RX_UNEXP_NORM_RESP_ERR: 1536 case DMA_RX_UNEXP_RDFRAME_ERR: 1537 case DMA_RX_UNKNOWN_FRM_ERR: 1538 { 1539 ts->stat = SAS_OPEN_REJECT; 1540 ts->open_rej_reason = SAS_OREJ_UNKNOWN; 1541 break; 1542 } 1543 default: 1544 break; 1545 } 1546 } 1547 break; 1548 case SAS_PROTOCOL_SMP: 1549 ts->stat = SAM_STAT_CHECK_CONDITION; 1550 break; 1551 1552 case SAS_PROTOCOL_SATA: 1553 case SAS_PROTOCOL_STP: 1554 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: 1555 { 1556 switch (error) { 1557 case TRANS_TX_OPEN_CNX_ERR_LOW_PHY_POWER: 1558 case TRANS_TX_OPEN_CNX_ERR_PATHWAY_BLOCKED: 1559 case TRANS_TX_OPEN_CNX_ERR_NO_DESTINATION: 1560 { 1561 ts->resp = SAS_TASK_UNDELIVERED; 1562 ts->stat = SAS_DEV_NO_RESPONSE; 1563 break; 1564 } 1565 case TRANS_TX_OPEN_CNX_ERR_PROTOCOL_NOT_SUPPORTED: 1566 case TRANS_TX_OPEN_CNX_ERR_CONNECTION_RATE_NOT_SUPPORTED: 1567 case TRANS_TX_OPEN_CNX_ERR_BAD_DESTINATION: 1568 case TRANS_TX_OPEN_CNX_ERR_BREAK_RCVD: 1569 case TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION: 1570 case TRANS_TX_OPEN_CNX_ERR_ZONE_VIOLATION: 1571 case TRANS_TX_OPEN_CNX_ERR_STP_RESOURCES_BUSY: 1572 { 1573 ts->stat = SAS_OPEN_REJECT; 1574 break; 1575 } 1576 case TRANS_TX_OPEN_CNX_ERR_OPEN_TIMEOUT: 1577 { 1578 ts->stat = SAS_OPEN_TO; 1579 break; 1580 } 1581 case DMA_RX_DATA_LEN_OVERFLOW: 1582 { 1583 ts->stat = SAS_DATA_OVERRUN; 1584 break; 1585 } 1586 case TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS: 1587 case TRANS_TX_ERR_PHY_NOT_ENABLE: 1588 case TRANS_TX_OPEN_CNX_ERR_BY_OTHER: 1589 case TRANS_TX_OPEN_CNX_ERR_AIP_TIMEOUT: 1590 case TRANS_TX_OPEN_RETRY_ERR_THRESHOLD_REACHED: 1591 case TRANS_TX_ERR_WITH_BREAK_TIMEOUT: 1592 case TRANS_TX_ERR_WITH_BREAK_REQUEST: 1593 case TRANS_TX_ERR_WITH_BREAK_RECEVIED: 1594 case TRANS_TX_ERR_WITH_CLOSE_TIMEOUT: 1595 case TRANS_TX_ERR_WITH_CLOSE_NORMAL: 1596 case TRANS_TX_ERR_WITH_CLOSE_DWS_TIMEOUT: 1597 case TRANS_TX_ERR_WITH_CLOSE_COMINIT: 1598 case TRANS_TX_ERR_WITH_NAK_RECEVIED: 1599 case TRANS_TX_ERR_WITH_ACK_NAK_TIMEOUT: 1600 case TRANS_TX_ERR_WITH_CREDIT_TIMEOUT: 1601 case TRANS_TX_ERR_WITH_WAIT_RECV_TIMEOUT: 1602 case TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR: 1603 case TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM: 1604 case TRANS_RX_ERR_WITH_RXFIS_DECODE_ERROR: 1605 case TRANS_RX_ERR_WITH_RXFIS_CRC_ERR: 1606 case TRANS_RX_ERR_WITH_RXFRAME_LENGTH_OVERRUN: 1607 case TRANS_RX_ERR_WITH_RXFIS_RX_SYNCP: 1608 case TRANS_RX_ERR_WITH_CLOSE_NORMAL: 1609 case TRANS_RX_ERR_WITH_CLOSE_PHY_DISABLE: 1610 case TRANS_RX_ERR_WITH_CLOSE_DWS_TIMEOUT: 1611 case TRANS_RX_ERR_WITH_CLOSE_COMINIT: 1612 case TRANS_RX_ERR_WITH_DATA_LEN0: 1613 case TRANS_RX_ERR_WITH_BAD_HASH: 1614 case TRANS_RX_XRDY_WLEN_ZERO_ERR: 1615 case TRANS_RX_SSP_FRM_LEN_ERR: 1616 case SIPC_RX_FIS_STATUS_ERR_BIT_VLD: 1617 case SIPC_RX_PIO_WRSETUP_STATUS_DRQ_ERR: 1618 case SIPC_RX_FIS_STATUS_BSY_BIT_ERR: 1619 case SIPC_RX_WRSETUP_LEN_ODD_ERR: 1620 case SIPC_RX_WRSETUP_LEN_ZERO_ERR: 1621 case SIPC_RX_WRDATA_LEN_NOT_MATCH_ERR: 1622 case SIPC_RX_SATA_UNEXP_FIS_ERR: 1623 case DMA_RX_SATA_FRAME_TYPE_ERR: 1624 case DMA_RX_UNEXP_RDFRAME_ERR: 1625 case DMA_RX_PIO_DATA_LEN_ERR: 1626 case DMA_RX_RDSETUP_STATUS_ERR: 1627 case DMA_RX_RDSETUP_STATUS_DRQ_ERR: 1628 case DMA_RX_RDSETUP_STATUS_BSY_ERR: 1629 case DMA_RX_RDSETUP_LEN_ODD_ERR: 1630 case DMA_RX_RDSETUP_LEN_ZERO_ERR: 1631 case DMA_RX_RDSETUP_LEN_OVER_ERR: 1632 case DMA_RX_RDSETUP_OFFSET_ERR: 1633 case DMA_RX_RDSETUP_ACTIVE_ERR: 1634 case DMA_RX_RDSETUP_ESTATUS_ERR: 1635 case DMA_RX_UNKNOWN_FRM_ERR: 1636 { 1637 ts->stat = SAS_OPEN_REJECT; 1638 break; 1639 } 1640 default: 1641 { 1642 ts->stat = SAS_PROTO_RESPONSE; 1643 break; 1644 } 1645 } 1646 sata_done_v2_hw(hisi_hba, task, slot); 1647 } 1648 break; 1649 default: 1650 break; 1651 } 1652 } 1653 1654 static int 1655 slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot, 1656 int abort) 1657 { 1658 struct sas_task *task = slot->task; 1659 struct hisi_sas_device *sas_dev; 1660 struct device *dev = &hisi_hba->pdev->dev; 1661 struct task_status_struct *ts; 1662 struct domain_device *device; 1663 enum exec_status sts; 1664 struct hisi_sas_complete_v2_hdr *complete_queue = 1665 hisi_hba->complete_hdr[slot->cmplt_queue]; 1666 struct hisi_sas_complete_v2_hdr *complete_hdr = 1667 &complete_queue[slot->cmplt_queue_slot]; 1668 1669 if (unlikely(!task || !task->lldd_task || !task->dev)) 1670 return -EINVAL; 1671 1672 ts = &task->task_status; 1673 device = task->dev; 1674 sas_dev = device->lldd_dev; 1675 1676 task->task_state_flags &= 1677 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); 1678 task->task_state_flags |= SAS_TASK_STATE_DONE; 1679 1680 memset(ts, 0, sizeof(*ts)); 1681 ts->resp = SAS_TASK_COMPLETE; 1682 1683 if (unlikely(!sas_dev || abort)) { 1684 if (!sas_dev) 1685 dev_dbg(dev, "slot complete: port has not device\n"); 1686 ts->stat = SAS_PHY_DOWN; 1687 goto out; 1688 } 1689 1690 /* Use SAS+TMF status codes */ 1691 switch ((complete_hdr->dw0 & CMPLT_HDR_ABORT_STAT_MSK) 1692 >> CMPLT_HDR_ABORT_STAT_OFF) { 1693 case STAT_IO_ABORTED: 1694 /* this io has been aborted by abort command */ 1695 ts->stat = SAS_ABORTED_TASK; 1696 goto out; 1697 case STAT_IO_COMPLETE: 1698 /* internal abort command complete */ 1699 ts->stat = TMF_RESP_FUNC_COMPLETE; 1700 goto out; 1701 case STAT_IO_NO_DEVICE: 1702 ts->stat = TMF_RESP_FUNC_COMPLETE; 1703 goto out; 1704 case STAT_IO_NOT_VALID: 1705 /* abort single io, controller don't find 1706 * the io need to abort 1707 */ 1708 ts->stat = TMF_RESP_FUNC_FAILED; 1709 goto out; 1710 default: 1711 break; 1712 } 1713 1714 if ((complete_hdr->dw0 & CMPLT_HDR_ERX_MSK) && 1715 (!(complete_hdr->dw0 & CMPLT_HDR_RSPNS_XFRD_MSK))) { 1716 1717 slot_err_v2_hw(hisi_hba, task, slot); 1718 if (unlikely(slot->abort)) { 1719 queue_work(hisi_hba->wq, &slot->abort_slot); 1720 /* immediately return and do not complete */ 1721 return ts->stat; 1722 } 1723 goto out; 1724 } 1725 1726 switch (task->task_proto) { 1727 case SAS_PROTOCOL_SSP: 1728 { 1729 struct ssp_response_iu *iu = slot->status_buffer + 1730 sizeof(struct hisi_sas_err_record); 1731 1732 sas_ssp_task_response(dev, task, iu); 1733 break; 1734 } 1735 case SAS_PROTOCOL_SMP: 1736 { 1737 struct scatterlist *sg_resp = &task->smp_task.smp_resp; 1738 void *to; 1739 1740 ts->stat = SAM_STAT_GOOD; 1741 to = kmap_atomic(sg_page(sg_resp)); 1742 1743 dma_unmap_sg(dev, &task->smp_task.smp_resp, 1, 1744 DMA_FROM_DEVICE); 1745 dma_unmap_sg(dev, &task->smp_task.smp_req, 1, 1746 DMA_TO_DEVICE); 1747 memcpy(to + sg_resp->offset, 1748 slot->status_buffer + 1749 sizeof(struct hisi_sas_err_record), 1750 sg_dma_len(sg_resp)); 1751 kunmap_atomic(to); 1752 break; 1753 } 1754 case SAS_PROTOCOL_SATA: 1755 case SAS_PROTOCOL_STP: 1756 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: 1757 { 1758 ts->stat = SAM_STAT_GOOD; 1759 sata_done_v2_hw(hisi_hba, task, slot); 1760 break; 1761 } 1762 default: 1763 ts->stat = SAM_STAT_CHECK_CONDITION; 1764 break; 1765 } 1766 1767 if (!slot->port->port_attached) { 1768 dev_err(dev, "slot complete: port %d has removed\n", 1769 slot->port->sas_port.id); 1770 ts->stat = SAS_PHY_DOWN; 1771 } 1772 1773 out: 1774 if (sas_dev) 1775 atomic64_dec(&sas_dev->running_req); 1776 1777 hisi_sas_slot_task_free(hisi_hba, task, slot); 1778 sts = ts->stat; 1779 1780 if (task->task_done) 1781 task->task_done(task); 1782 1783 return sts; 1784 } 1785 1786 static u8 get_ata_protocol(u8 cmd, int direction) 1787 { 1788 switch (cmd) { 1789 case ATA_CMD_FPDMA_WRITE: 1790 case ATA_CMD_FPDMA_READ: 1791 case ATA_CMD_FPDMA_RECV: 1792 case ATA_CMD_FPDMA_SEND: 1793 case ATA_CMD_NCQ_NON_DATA: 1794 return SATA_PROTOCOL_FPDMA; 1795 1796 case ATA_CMD_DOWNLOAD_MICRO: 1797 case ATA_CMD_ID_ATA: 1798 case ATA_CMD_PMP_READ: 1799 case ATA_CMD_READ_LOG_EXT: 1800 case ATA_CMD_PIO_READ: 1801 case ATA_CMD_PIO_READ_EXT: 1802 case ATA_CMD_PMP_WRITE: 1803 case ATA_CMD_WRITE_LOG_EXT: 1804 case ATA_CMD_PIO_WRITE: 1805 case ATA_CMD_PIO_WRITE_EXT: 1806 return SATA_PROTOCOL_PIO; 1807 1808 case ATA_CMD_DSM: 1809 case ATA_CMD_DOWNLOAD_MICRO_DMA: 1810 case ATA_CMD_PMP_READ_DMA: 1811 case ATA_CMD_PMP_WRITE_DMA: 1812 case ATA_CMD_READ: 1813 case ATA_CMD_READ_EXT: 1814 case ATA_CMD_READ_LOG_DMA_EXT: 1815 case ATA_CMD_READ_STREAM_DMA_EXT: 1816 case ATA_CMD_TRUSTED_RCV_DMA: 1817 case ATA_CMD_TRUSTED_SND_DMA: 1818 case ATA_CMD_WRITE: 1819 case ATA_CMD_WRITE_EXT: 1820 case ATA_CMD_WRITE_FUA_EXT: 1821 case ATA_CMD_WRITE_QUEUED: 1822 case ATA_CMD_WRITE_LOG_DMA_EXT: 1823 case ATA_CMD_WRITE_STREAM_DMA_EXT: 1824 return SATA_PROTOCOL_DMA; 1825 1826 case ATA_CMD_CHK_POWER: 1827 case ATA_CMD_DEV_RESET: 1828 case ATA_CMD_EDD: 1829 case ATA_CMD_FLUSH: 1830 case ATA_CMD_FLUSH_EXT: 1831 case ATA_CMD_VERIFY: 1832 case ATA_CMD_VERIFY_EXT: 1833 case ATA_CMD_SET_FEATURES: 1834 case ATA_CMD_STANDBY: 1835 case ATA_CMD_STANDBYNOW1: 1836 return SATA_PROTOCOL_NONDATA; 1837 default: 1838 if (direction == DMA_NONE) 1839 return SATA_PROTOCOL_NONDATA; 1840 return SATA_PROTOCOL_PIO; 1841 } 1842 } 1843 1844 static int get_ncq_tag_v2_hw(struct sas_task *task, u32 *tag) 1845 { 1846 struct ata_queued_cmd *qc = task->uldd_task; 1847 1848 if (qc) { 1849 if (qc->tf.command == ATA_CMD_FPDMA_WRITE || 1850 qc->tf.command == ATA_CMD_FPDMA_READ) { 1851 *tag = qc->tag; 1852 return 1; 1853 } 1854 } 1855 return 0; 1856 } 1857 1858 static int prep_ata_v2_hw(struct hisi_hba *hisi_hba, 1859 struct hisi_sas_slot *slot) 1860 { 1861 struct sas_task *task = slot->task; 1862 struct domain_device *device = task->dev; 1863 struct domain_device *parent_dev = device->parent; 1864 struct hisi_sas_device *sas_dev = device->lldd_dev; 1865 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; 1866 struct hisi_sas_port *port = device->port->lldd_port; 1867 u8 *buf_cmd; 1868 int has_data = 0, rc = 0, hdr_tag = 0; 1869 u32 dw1 = 0, dw2 = 0; 1870 1871 /* create header */ 1872 /* dw0 */ 1873 hdr->dw0 = cpu_to_le32(port->id << CMD_HDR_PORT_OFF); 1874 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) 1875 hdr->dw0 |= cpu_to_le32(3 << CMD_HDR_CMD_OFF); 1876 else 1877 hdr->dw0 |= cpu_to_le32(4 << CMD_HDR_CMD_OFF); 1878 1879 /* dw1 */ 1880 switch (task->data_dir) { 1881 case DMA_TO_DEVICE: 1882 has_data = 1; 1883 dw1 |= DIR_TO_DEVICE << CMD_HDR_DIR_OFF; 1884 break; 1885 case DMA_FROM_DEVICE: 1886 has_data = 1; 1887 dw1 |= DIR_TO_INI << CMD_HDR_DIR_OFF; 1888 break; 1889 default: 1890 dw1 &= ~CMD_HDR_DIR_MSK; 1891 } 1892 1893 if (0 == task->ata_task.fis.command) 1894 dw1 |= 1 << CMD_HDR_RESET_OFF; 1895 1896 dw1 |= (get_ata_protocol(task->ata_task.fis.command, task->data_dir)) 1897 << CMD_HDR_FRAME_TYPE_OFF; 1898 dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF; 1899 hdr->dw1 = cpu_to_le32(dw1); 1900 1901 /* dw2 */ 1902 if (task->ata_task.use_ncq && get_ncq_tag_v2_hw(task, &hdr_tag)) { 1903 task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); 1904 dw2 |= hdr_tag << CMD_HDR_NCQ_TAG_OFF; 1905 } 1906 1907 dw2 |= (HISI_SAS_MAX_STP_RESP_SZ / 4) << CMD_HDR_CFL_OFF | 1908 2 << CMD_HDR_SG_MOD_OFF; 1909 hdr->dw2 = cpu_to_le32(dw2); 1910 1911 /* dw3 */ 1912 hdr->transfer_tags = cpu_to_le32(slot->idx); 1913 1914 if (has_data) { 1915 rc = prep_prd_sge_v2_hw(hisi_hba, slot, hdr, task->scatter, 1916 slot->n_elem); 1917 if (rc) 1918 return rc; 1919 } 1920 1921 1922 hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len); 1923 hdr->cmd_table_addr = cpu_to_le64(slot->command_table_dma); 1924 hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma); 1925 1926 buf_cmd = slot->command_table; 1927 1928 if (likely(!task->ata_task.device_control_reg_update)) 1929 task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ 1930 /* fill in command FIS */ 1931 memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); 1932 1933 return 0; 1934 } 1935 1936 static int prep_abort_v2_hw(struct hisi_hba *hisi_hba, 1937 struct hisi_sas_slot *slot, 1938 int device_id, int abort_flag, int tag_to_abort) 1939 { 1940 struct sas_task *task = slot->task; 1941 struct domain_device *dev = task->dev; 1942 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; 1943 struct hisi_sas_port *port = slot->port; 1944 1945 /* dw0 */ 1946 hdr->dw0 = cpu_to_le32((5 << CMD_HDR_CMD_OFF) | /*abort*/ 1947 (port->id << CMD_HDR_PORT_OFF) | 1948 ((dev_is_sata(dev) ? 1:0) << 1949 CMD_HDR_ABORT_DEVICE_TYPE_OFF) | 1950 (abort_flag << CMD_HDR_ABORT_FLAG_OFF)); 1951 1952 /* dw1 */ 1953 hdr->dw1 = cpu_to_le32(device_id << CMD_HDR_DEV_ID_OFF); 1954 1955 /* dw7 */ 1956 hdr->dw7 = cpu_to_le32(tag_to_abort << CMD_HDR_ABORT_IPTT_OFF); 1957 hdr->transfer_tags = cpu_to_le32(slot->idx); 1958 1959 return 0; 1960 } 1961 1962 static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba) 1963 { 1964 int i, res = 0; 1965 u32 context, port_id, link_rate, hard_phy_linkrate; 1966 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1967 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1968 struct device *dev = &hisi_hba->pdev->dev; 1969 u32 *frame_rcvd = (u32 *)sas_phy->frame_rcvd; 1970 struct sas_identify_frame *id = (struct sas_identify_frame *)frame_rcvd; 1971 1972 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1); 1973 1974 /* Check for SATA dev */ 1975 context = hisi_sas_read32(hisi_hba, PHY_CONTEXT); 1976 if (context & (1 << phy_no)) 1977 goto end; 1978 1979 if (phy_no == 8) { 1980 u32 port_state = hisi_sas_read32(hisi_hba, PORT_STATE); 1981 1982 port_id = (port_state & PORT_STATE_PHY8_PORT_NUM_MSK) >> 1983 PORT_STATE_PHY8_PORT_NUM_OFF; 1984 link_rate = (port_state & PORT_STATE_PHY8_CONN_RATE_MSK) >> 1985 PORT_STATE_PHY8_CONN_RATE_OFF; 1986 } else { 1987 port_id = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA); 1988 port_id = (port_id >> (4 * phy_no)) & 0xf; 1989 link_rate = hisi_sas_read32(hisi_hba, PHY_CONN_RATE); 1990 link_rate = (link_rate >> (phy_no * 4)) & 0xf; 1991 } 1992 1993 if (port_id == 0xf) { 1994 dev_err(dev, "phyup: phy%d invalid portid\n", phy_no); 1995 res = IRQ_NONE; 1996 goto end; 1997 } 1998 1999 for (i = 0; i < 6; i++) { 2000 u32 idaf = hisi_sas_phy_read32(hisi_hba, phy_no, 2001 RX_IDAF_DWORD0 + (i * 4)); 2002 frame_rcvd[i] = __swab32(idaf); 2003 } 2004 2005 sas_phy->linkrate = link_rate; 2006 hard_phy_linkrate = hisi_sas_phy_read32(hisi_hba, phy_no, 2007 HARD_PHY_LINKRATE); 2008 phy->maximum_linkrate = hard_phy_linkrate & 0xf; 2009 phy->minimum_linkrate = (hard_phy_linkrate >> 4) & 0xf; 2010 2011 sas_phy->oob_mode = SAS_OOB_MODE; 2012 memcpy(sas_phy->attached_sas_addr, &id->sas_addr, SAS_ADDR_SIZE); 2013 dev_info(dev, "phyup: phy%d link_rate=%d\n", phy_no, link_rate); 2014 phy->port_id = port_id; 2015 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); 2016 phy->phy_type |= PORT_TYPE_SAS; 2017 phy->phy_attached = 1; 2018 phy->identify.device_type = id->dev_type; 2019 phy->frame_rcvd_size = sizeof(struct sas_identify_frame); 2020 if (phy->identify.device_type == SAS_END_DEVICE) 2021 phy->identify.target_port_protocols = 2022 SAS_PROTOCOL_SSP; 2023 else if (phy->identify.device_type != SAS_PHY_UNUSED) 2024 phy->identify.target_port_protocols = 2025 SAS_PROTOCOL_SMP; 2026 queue_work(hisi_hba->wq, &phy->phyup_ws); 2027 2028 end: 2029 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, 2030 CHL_INT0_SL_PHY_ENABLE_MSK); 2031 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 0); 2032 2033 return res; 2034 } 2035 2036 static int phy_down_v2_hw(int phy_no, struct hisi_hba *hisi_hba) 2037 { 2038 int res = 0; 2039 u32 phy_state, sl_ctrl, txid_auto; 2040 2041 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1); 2042 2043 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE); 2044 hisi_sas_phy_down(hisi_hba, phy_no, (phy_state & 1 << phy_no) ? 1 : 0); 2045 2046 sl_ctrl = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL); 2047 hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, 2048 sl_ctrl & ~SL_CONTROL_CTA_MSK); 2049 2050 txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO); 2051 hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO, 2052 txid_auto | TXID_AUTO_CT3_MSK); 2053 2054 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, CHL_INT0_NOT_RDY_MSK); 2055 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 0); 2056 2057 return res; 2058 } 2059 2060 static irqreturn_t int_phy_updown_v2_hw(int irq_no, void *p) 2061 { 2062 struct hisi_hba *hisi_hba = p; 2063 u32 irq_msk; 2064 int phy_no = 0; 2065 irqreturn_t res = IRQ_HANDLED; 2066 2067 irq_msk = (hisi_sas_read32(hisi_hba, HGC_INVLD_DQE_INFO) 2068 >> HGC_INVLD_DQE_INFO_FB_CH0_OFF) & 0x1ff; 2069 while (irq_msk) { 2070 if (irq_msk & 1) { 2071 u32 irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, 2072 CHL_INT0); 2073 2074 if (irq_value & CHL_INT0_SL_PHY_ENABLE_MSK) 2075 /* phy up */ 2076 if (phy_up_v2_hw(phy_no, hisi_hba)) { 2077 res = IRQ_NONE; 2078 goto end; 2079 } 2080 2081 if (irq_value & CHL_INT0_NOT_RDY_MSK) 2082 /* phy down */ 2083 if (phy_down_v2_hw(phy_no, hisi_hba)) { 2084 res = IRQ_NONE; 2085 goto end; 2086 } 2087 } 2088 irq_msk >>= 1; 2089 phy_no++; 2090 } 2091 2092 end: 2093 return res; 2094 } 2095 2096 static void phy_bcast_v2_hw(int phy_no, struct hisi_hba *hisi_hba) 2097 { 2098 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 2099 struct asd_sas_phy *sas_phy = &phy->sas_phy; 2100 struct sas_ha_struct *sas_ha = &hisi_hba->sha; 2101 u32 bcast_status; 2102 2103 hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1); 2104 bcast_status = hisi_sas_phy_read32(hisi_hba, phy_no, RX_PRIMS_STATUS); 2105 if (bcast_status & RX_BCAST_CHG_MSK) 2106 sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD); 2107 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, 2108 CHL_INT0_SL_RX_BCST_ACK_MSK); 2109 hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0); 2110 } 2111 2112 static irqreturn_t int_chnl_int_v2_hw(int irq_no, void *p) 2113 { 2114 struct hisi_hba *hisi_hba = p; 2115 struct device *dev = &hisi_hba->pdev->dev; 2116 u32 ent_msk, ent_tmp, irq_msk; 2117 int phy_no = 0; 2118 2119 ent_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3); 2120 ent_tmp = ent_msk; 2121 ent_msk |= ENT_INT_SRC_MSK3_ENT95_MSK_MSK; 2122 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, ent_msk); 2123 2124 irq_msk = (hisi_sas_read32(hisi_hba, HGC_INVLD_DQE_INFO) >> 2125 HGC_INVLD_DQE_INFO_FB_CH3_OFF) & 0x1ff; 2126 2127 while (irq_msk) { 2128 if (irq_msk & (1 << phy_no)) { 2129 u32 irq_value0 = hisi_sas_phy_read32(hisi_hba, phy_no, 2130 CHL_INT0); 2131 u32 irq_value1 = hisi_sas_phy_read32(hisi_hba, phy_no, 2132 CHL_INT1); 2133 u32 irq_value2 = hisi_sas_phy_read32(hisi_hba, phy_no, 2134 CHL_INT2); 2135 2136 if (irq_value1) { 2137 if (irq_value1 & (CHL_INT1_DMAC_RX_ECC_ERR_MSK | 2138 CHL_INT1_DMAC_TX_ECC_ERR_MSK)) 2139 panic("%s: DMAC RX/TX ecc bad error!\ 2140 (0x%x)", 2141 dev_name(dev), irq_value1); 2142 2143 hisi_sas_phy_write32(hisi_hba, phy_no, 2144 CHL_INT1, irq_value1); 2145 } 2146 2147 if (irq_value2) 2148 hisi_sas_phy_write32(hisi_hba, phy_no, 2149 CHL_INT2, irq_value2); 2150 2151 2152 if (irq_value0) { 2153 if (irq_value0 & CHL_INT0_SL_RX_BCST_ACK_MSK) 2154 phy_bcast_v2_hw(phy_no, hisi_hba); 2155 2156 hisi_sas_phy_write32(hisi_hba, phy_no, 2157 CHL_INT0, irq_value0 2158 & (~CHL_INT0_HOTPLUG_TOUT_MSK) 2159 & (~CHL_INT0_SL_PHY_ENABLE_MSK) 2160 & (~CHL_INT0_NOT_RDY_MSK)); 2161 } 2162 } 2163 irq_msk &= ~(1 << phy_no); 2164 phy_no++; 2165 } 2166 2167 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, ent_tmp); 2168 2169 return IRQ_HANDLED; 2170 } 2171 2172 static void 2173 one_bit_ecc_error_process_v2_hw(struct hisi_hba *hisi_hba, u32 irq_value) 2174 { 2175 struct device *dev = &hisi_hba->pdev->dev; 2176 u32 reg_val; 2177 2178 if (irq_value & BIT(SAS_ECC_INTR_DQE_ECC_1B_OFF)) { 2179 reg_val = hisi_sas_read32(hisi_hba, HGC_DQE_ECC_ADDR); 2180 dev_warn(dev, "hgc_dqe_acc1b_intr found: \ 2181 Ram address is 0x%08X\n", 2182 (reg_val & HGC_DQE_ECC_1B_ADDR_MSK) >> 2183 HGC_DQE_ECC_1B_ADDR_OFF); 2184 } 2185 2186 if (irq_value & BIT(SAS_ECC_INTR_IOST_ECC_1B_OFF)) { 2187 reg_val = hisi_sas_read32(hisi_hba, HGC_IOST_ECC_ADDR); 2188 dev_warn(dev, "hgc_iost_acc1b_intr found: \ 2189 Ram address is 0x%08X\n", 2190 (reg_val & HGC_IOST_ECC_1B_ADDR_MSK) >> 2191 HGC_IOST_ECC_1B_ADDR_OFF); 2192 } 2193 2194 if (irq_value & BIT(SAS_ECC_INTR_ITCT_ECC_1B_OFF)) { 2195 reg_val = hisi_sas_read32(hisi_hba, HGC_ITCT_ECC_ADDR); 2196 dev_warn(dev, "hgc_itct_acc1b_intr found: \ 2197 Ram address is 0x%08X\n", 2198 (reg_val & HGC_ITCT_ECC_1B_ADDR_MSK) >> 2199 HGC_ITCT_ECC_1B_ADDR_OFF); 2200 } 2201 2202 if (irq_value & BIT(SAS_ECC_INTR_IOSTLIST_ECC_1B_OFF)) { 2203 reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2); 2204 dev_warn(dev, "hgc_iostl_acc1b_intr found: \ 2205 memory address is 0x%08X\n", 2206 (reg_val & HGC_LM_DFX_STATUS2_IOSTLIST_MSK) >> 2207 HGC_LM_DFX_STATUS2_IOSTLIST_OFF); 2208 } 2209 2210 if (irq_value & BIT(SAS_ECC_INTR_ITCTLIST_ECC_1B_OFF)) { 2211 reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2); 2212 dev_warn(dev, "hgc_itctl_acc1b_intr found: \ 2213 memory address is 0x%08X\n", 2214 (reg_val & HGC_LM_DFX_STATUS2_ITCTLIST_MSK) >> 2215 HGC_LM_DFX_STATUS2_ITCTLIST_OFF); 2216 } 2217 2218 if (irq_value & BIT(SAS_ECC_INTR_CQE_ECC_1B_OFF)) { 2219 reg_val = hisi_sas_read32(hisi_hba, HGC_CQE_ECC_ADDR); 2220 dev_warn(dev, "hgc_cqe_acc1b_intr found: \ 2221 Ram address is 0x%08X\n", 2222 (reg_val & HGC_CQE_ECC_1B_ADDR_MSK) >> 2223 HGC_CQE_ECC_1B_ADDR_OFF); 2224 } 2225 2226 if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_1B_OFF)) { 2227 reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14); 2228 dev_warn(dev, "rxm_mem0_acc1b_intr found: \ 2229 memory address is 0x%08X\n", 2230 (reg_val & HGC_RXM_DFX_STATUS14_MEM0_MSK) >> 2231 HGC_RXM_DFX_STATUS14_MEM0_OFF); 2232 } 2233 2234 if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_1B_OFF)) { 2235 reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14); 2236 dev_warn(dev, "rxm_mem1_acc1b_intr found: \ 2237 memory address is 0x%08X\n", 2238 (reg_val & HGC_RXM_DFX_STATUS14_MEM1_MSK) >> 2239 HGC_RXM_DFX_STATUS14_MEM1_OFF); 2240 } 2241 2242 if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_1B_OFF)) { 2243 reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14); 2244 dev_warn(dev, "rxm_mem2_acc1b_intr found: \ 2245 memory address is 0x%08X\n", 2246 (reg_val & HGC_RXM_DFX_STATUS14_MEM2_MSK) >> 2247 HGC_RXM_DFX_STATUS14_MEM2_OFF); 2248 } 2249 2250 if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF)) { 2251 reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS15); 2252 dev_warn(dev, "rxm_mem3_acc1b_intr found: \ 2253 memory address is 0x%08X\n", 2254 (reg_val & HGC_RXM_DFX_STATUS15_MEM3_MSK) >> 2255 HGC_RXM_DFX_STATUS15_MEM3_OFF); 2256 } 2257 2258 } 2259 2260 static void multi_bit_ecc_error_process_v2_hw(struct hisi_hba *hisi_hba, 2261 u32 irq_value) 2262 { 2263 u32 reg_val; 2264 struct device *dev = &hisi_hba->pdev->dev; 2265 2266 if (irq_value & BIT(SAS_ECC_INTR_DQE_ECC_MB_OFF)) { 2267 reg_val = hisi_sas_read32(hisi_hba, HGC_DQE_ECC_ADDR); 2268 panic("%s: hgc_dqe_accbad_intr (0x%x) found: \ 2269 Ram address is 0x%08X\n", 2270 dev_name(dev), irq_value, 2271 (reg_val & HGC_DQE_ECC_MB_ADDR_MSK) >> 2272 HGC_DQE_ECC_MB_ADDR_OFF); 2273 } 2274 2275 if (irq_value & BIT(SAS_ECC_INTR_IOST_ECC_MB_OFF)) { 2276 reg_val = hisi_sas_read32(hisi_hba, HGC_IOST_ECC_ADDR); 2277 panic("%s: hgc_iost_accbad_intr (0x%x) found: \ 2278 Ram address is 0x%08X\n", 2279 dev_name(dev), irq_value, 2280 (reg_val & HGC_IOST_ECC_MB_ADDR_MSK) >> 2281 HGC_IOST_ECC_MB_ADDR_OFF); 2282 } 2283 2284 if (irq_value & BIT(SAS_ECC_INTR_ITCT_ECC_MB_OFF)) { 2285 reg_val = hisi_sas_read32(hisi_hba, HGC_ITCT_ECC_ADDR); 2286 panic("%s: hgc_itct_accbad_intr (0x%x) found: \ 2287 Ram address is 0x%08X\n", 2288 dev_name(dev), irq_value, 2289 (reg_val & HGC_ITCT_ECC_MB_ADDR_MSK) >> 2290 HGC_ITCT_ECC_MB_ADDR_OFF); 2291 } 2292 2293 if (irq_value & BIT(SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF)) { 2294 reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2); 2295 panic("%s: hgc_iostl_accbad_intr (0x%x) found: \ 2296 memory address is 0x%08X\n", 2297 dev_name(dev), irq_value, 2298 (reg_val & HGC_LM_DFX_STATUS2_IOSTLIST_MSK) >> 2299 HGC_LM_DFX_STATUS2_IOSTLIST_OFF); 2300 } 2301 2302 if (irq_value & BIT(SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF)) { 2303 reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2); 2304 panic("%s: hgc_itctl_accbad_intr (0x%x) found: \ 2305 memory address is 0x%08X\n", 2306 dev_name(dev), irq_value, 2307 (reg_val & HGC_LM_DFX_STATUS2_ITCTLIST_MSK) >> 2308 HGC_LM_DFX_STATUS2_ITCTLIST_OFF); 2309 } 2310 2311 if (irq_value & BIT(SAS_ECC_INTR_CQE_ECC_MB_OFF)) { 2312 reg_val = hisi_sas_read32(hisi_hba, HGC_CQE_ECC_ADDR); 2313 panic("%s: hgc_cqe_accbad_intr (0x%x) found: \ 2314 Ram address is 0x%08X\n", 2315 dev_name(dev), irq_value, 2316 (reg_val & HGC_CQE_ECC_MB_ADDR_MSK) >> 2317 HGC_CQE_ECC_MB_ADDR_OFF); 2318 } 2319 2320 if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF)) { 2321 reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14); 2322 panic("%s: rxm_mem0_accbad_intr (0x%x) found: \ 2323 memory address is 0x%08X\n", 2324 dev_name(dev), irq_value, 2325 (reg_val & HGC_RXM_DFX_STATUS14_MEM0_MSK) >> 2326 HGC_RXM_DFX_STATUS14_MEM0_OFF); 2327 } 2328 2329 if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF)) { 2330 reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14); 2331 panic("%s: rxm_mem1_accbad_intr (0x%x) found: \ 2332 memory address is 0x%08X\n", 2333 dev_name(dev), irq_value, 2334 (reg_val & HGC_RXM_DFX_STATUS14_MEM1_MSK) >> 2335 HGC_RXM_DFX_STATUS14_MEM1_OFF); 2336 } 2337 2338 if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF)) { 2339 reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14); 2340 panic("%s: rxm_mem2_accbad_intr (0x%x) found: \ 2341 memory address is 0x%08X\n", 2342 dev_name(dev), irq_value, 2343 (reg_val & HGC_RXM_DFX_STATUS14_MEM2_MSK) >> 2344 HGC_RXM_DFX_STATUS14_MEM2_OFF); 2345 } 2346 2347 if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF)) { 2348 reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS15); 2349 panic("%s: rxm_mem3_accbad_intr (0x%x) found: \ 2350 memory address is 0x%08X\n", 2351 dev_name(dev), irq_value, 2352 (reg_val & HGC_RXM_DFX_STATUS15_MEM3_MSK) >> 2353 HGC_RXM_DFX_STATUS15_MEM3_OFF); 2354 } 2355 2356 } 2357 2358 static irqreturn_t fatal_ecc_int_v2_hw(int irq_no, void *p) 2359 { 2360 struct hisi_hba *hisi_hba = p; 2361 u32 irq_value, irq_msk; 2362 2363 irq_msk = hisi_sas_read32(hisi_hba, SAS_ECC_INTR_MSK); 2364 hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, irq_msk | 0xffffffff); 2365 2366 irq_value = hisi_sas_read32(hisi_hba, SAS_ECC_INTR); 2367 if (irq_value) { 2368 one_bit_ecc_error_process_v2_hw(hisi_hba, irq_value); 2369 multi_bit_ecc_error_process_v2_hw(hisi_hba, irq_value); 2370 } 2371 2372 hisi_sas_write32(hisi_hba, SAS_ECC_INTR, irq_value); 2373 hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, irq_msk); 2374 2375 return IRQ_HANDLED; 2376 } 2377 2378 #define AXI_ERR_NR 8 2379 static const char axi_err_info[AXI_ERR_NR][32] = { 2380 "IOST_AXI_W_ERR", 2381 "IOST_AXI_R_ERR", 2382 "ITCT_AXI_W_ERR", 2383 "ITCT_AXI_R_ERR", 2384 "SATA_AXI_W_ERR", 2385 "SATA_AXI_R_ERR", 2386 "DQE_AXI_R_ERR", 2387 "CQE_AXI_W_ERR" 2388 }; 2389 2390 #define FIFO_ERR_NR 5 2391 static const char fifo_err_info[FIFO_ERR_NR][32] = { 2392 "CQE_WINFO_FIFO", 2393 "CQE_MSG_FIFIO", 2394 "GETDQE_FIFO", 2395 "CMDP_FIFO", 2396 "AWTCTRL_FIFO" 2397 }; 2398 2399 static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p) 2400 { 2401 struct hisi_hba *hisi_hba = p; 2402 u32 irq_value, irq_msk, err_value; 2403 struct device *dev = &hisi_hba->pdev->dev; 2404 2405 irq_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3); 2406 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk | 0xfffffffe); 2407 2408 irq_value = hisi_sas_read32(hisi_hba, ENT_INT_SRC3); 2409 if (irq_value) { 2410 if (irq_value & BIT(ENT_INT_SRC3_WP_DEPTH_OFF)) { 2411 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 2412 1 << ENT_INT_SRC3_WP_DEPTH_OFF); 2413 panic("%s: write pointer and depth error (0x%x) \ 2414 found!\n", 2415 dev_name(dev), irq_value); 2416 } 2417 2418 if (irq_value & BIT(ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF)) { 2419 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 2420 1 << 2421 ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF); 2422 panic("%s: iptt no match slot error (0x%x) found!\n", 2423 dev_name(dev), irq_value); 2424 } 2425 2426 if (irq_value & BIT(ENT_INT_SRC3_RP_DEPTH_OFF)) 2427 panic("%s: read pointer and depth error (0x%x) \ 2428 found!\n", 2429 dev_name(dev), irq_value); 2430 2431 if (irq_value & BIT(ENT_INT_SRC3_AXI_OFF)) { 2432 int i; 2433 2434 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 2435 1 << ENT_INT_SRC3_AXI_OFF); 2436 err_value = hisi_sas_read32(hisi_hba, 2437 HGC_AXI_FIFO_ERR_INFO); 2438 2439 for (i = 0; i < AXI_ERR_NR; i++) { 2440 if (err_value & BIT(i)) 2441 panic("%s: %s (0x%x) found!\n", 2442 dev_name(dev), 2443 axi_err_info[i], irq_value); 2444 } 2445 } 2446 2447 if (irq_value & BIT(ENT_INT_SRC3_FIFO_OFF)) { 2448 int i; 2449 2450 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 2451 1 << ENT_INT_SRC3_FIFO_OFF); 2452 err_value = hisi_sas_read32(hisi_hba, 2453 HGC_AXI_FIFO_ERR_INFO); 2454 2455 for (i = 0; i < FIFO_ERR_NR; i++) { 2456 if (err_value & BIT(AXI_ERR_NR + i)) 2457 panic("%s: %s (0x%x) found!\n", 2458 dev_name(dev), 2459 fifo_err_info[i], irq_value); 2460 } 2461 2462 } 2463 2464 if (irq_value & BIT(ENT_INT_SRC3_LM_OFF)) { 2465 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 2466 1 << ENT_INT_SRC3_LM_OFF); 2467 panic("%s: LM add/fetch list error (0x%x) found!\n", 2468 dev_name(dev), irq_value); 2469 } 2470 2471 if (irq_value & BIT(ENT_INT_SRC3_ABT_OFF)) { 2472 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 2473 1 << ENT_INT_SRC3_ABT_OFF); 2474 panic("%s: SAS_HGC_ABT fetch LM list error (0x%x) found!\n", 2475 dev_name(dev), irq_value); 2476 } 2477 } 2478 2479 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk); 2480 2481 return IRQ_HANDLED; 2482 } 2483 2484 static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p) 2485 { 2486 struct hisi_sas_cq *cq = p; 2487 struct hisi_hba *hisi_hba = cq->hisi_hba; 2488 struct hisi_sas_slot *slot; 2489 struct hisi_sas_itct *itct; 2490 struct hisi_sas_complete_v2_hdr *complete_queue; 2491 u32 irq_value, rd_point = cq->rd_point, wr_point, dev_id; 2492 int queue = cq->id; 2493 2494 complete_queue = hisi_hba->complete_hdr[queue]; 2495 irq_value = hisi_sas_read32(hisi_hba, OQ_INT_SRC); 2496 2497 hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue); 2498 2499 wr_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_WR_PTR + 2500 (0x14 * queue)); 2501 2502 while (rd_point != wr_point) { 2503 struct hisi_sas_complete_v2_hdr *complete_hdr; 2504 int iptt; 2505 2506 complete_hdr = &complete_queue[rd_point]; 2507 2508 /* Check for NCQ completion */ 2509 if (complete_hdr->act) { 2510 u32 act_tmp = complete_hdr->act; 2511 int ncq_tag_count = ffs(act_tmp); 2512 2513 dev_id = (complete_hdr->dw1 & CMPLT_HDR_DEV_ID_MSK) >> 2514 CMPLT_HDR_DEV_ID_OFF; 2515 itct = &hisi_hba->itct[dev_id]; 2516 2517 /* The NCQ tags are held in the itct header */ 2518 while (ncq_tag_count) { 2519 __le64 *ncq_tag = &itct->qw4_15[0]; 2520 2521 ncq_tag_count -= 1; 2522 iptt = (ncq_tag[ncq_tag_count / 5] 2523 >> (ncq_tag_count % 5) * 12) & 0xfff; 2524 2525 slot = &hisi_hba->slot_info[iptt]; 2526 slot->cmplt_queue_slot = rd_point; 2527 slot->cmplt_queue = queue; 2528 slot_complete_v2_hw(hisi_hba, slot, 0); 2529 2530 act_tmp &= ~(1 << ncq_tag_count); 2531 ncq_tag_count = ffs(act_tmp); 2532 } 2533 } else { 2534 iptt = (complete_hdr->dw1) & CMPLT_HDR_IPTT_MSK; 2535 slot = &hisi_hba->slot_info[iptt]; 2536 slot->cmplt_queue_slot = rd_point; 2537 slot->cmplt_queue = queue; 2538 slot_complete_v2_hw(hisi_hba, slot, 0); 2539 } 2540 2541 if (++rd_point >= HISI_SAS_QUEUE_SLOTS) 2542 rd_point = 0; 2543 } 2544 2545 /* update rd_point */ 2546 cq->rd_point = rd_point; 2547 hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point); 2548 return IRQ_HANDLED; 2549 } 2550 2551 static irqreturn_t sata_int_v2_hw(int irq_no, void *p) 2552 { 2553 struct hisi_sas_phy *phy = p; 2554 struct hisi_hba *hisi_hba = phy->hisi_hba; 2555 struct asd_sas_phy *sas_phy = &phy->sas_phy; 2556 struct device *dev = &hisi_hba->pdev->dev; 2557 struct hisi_sas_initial_fis *initial_fis; 2558 struct dev_to_host_fis *fis; 2559 u32 ent_tmp, ent_msk, ent_int, port_id, link_rate, hard_phy_linkrate; 2560 irqreturn_t res = IRQ_HANDLED; 2561 u8 attached_sas_addr[SAS_ADDR_SIZE] = {0}; 2562 int phy_no, offset; 2563 2564 phy_no = sas_phy->id; 2565 initial_fis = &hisi_hba->initial_fis[phy_no]; 2566 fis = &initial_fis->fis; 2567 2568 offset = 4 * (phy_no / 4); 2569 ent_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK1 + offset); 2570 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1 + offset, 2571 ent_msk | 1 << ((phy_no % 4) * 8)); 2572 2573 ent_int = hisi_sas_read32(hisi_hba, ENT_INT_SRC1 + offset); 2574 ent_tmp = ent_int & (1 << (ENT_INT_SRC1_D2H_FIS_CH1_OFF * 2575 (phy_no % 4))); 2576 ent_int >>= ENT_INT_SRC1_D2H_FIS_CH1_OFF * (phy_no % 4); 2577 if ((ent_int & ENT_INT_SRC1_D2H_FIS_CH0_MSK) == 0) { 2578 dev_warn(dev, "sata int: phy%d did not receive FIS\n", phy_no); 2579 res = IRQ_NONE; 2580 goto end; 2581 } 2582 2583 /* check ERR bit of Status Register */ 2584 if (fis->status & ATA_ERR) { 2585 dev_warn(dev, "sata int: phy%d FIS status: 0x%x\n", phy_no, 2586 fis->status); 2587 disable_phy_v2_hw(hisi_hba, phy_no); 2588 enable_phy_v2_hw(hisi_hba, phy_no); 2589 res = IRQ_NONE; 2590 goto end; 2591 } 2592 2593 if (unlikely(phy_no == 8)) { 2594 u32 port_state = hisi_sas_read32(hisi_hba, PORT_STATE); 2595 2596 port_id = (port_state & PORT_STATE_PHY8_PORT_NUM_MSK) >> 2597 PORT_STATE_PHY8_PORT_NUM_OFF; 2598 link_rate = (port_state & PORT_STATE_PHY8_CONN_RATE_MSK) >> 2599 PORT_STATE_PHY8_CONN_RATE_OFF; 2600 } else { 2601 port_id = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA); 2602 port_id = (port_id >> (4 * phy_no)) & 0xf; 2603 link_rate = hisi_sas_read32(hisi_hba, PHY_CONN_RATE); 2604 link_rate = (link_rate >> (phy_no * 4)) & 0xf; 2605 } 2606 2607 if (port_id == 0xf) { 2608 dev_err(dev, "sata int: phy%d invalid portid\n", phy_no); 2609 res = IRQ_NONE; 2610 goto end; 2611 } 2612 2613 sas_phy->linkrate = link_rate; 2614 hard_phy_linkrate = hisi_sas_phy_read32(hisi_hba, phy_no, 2615 HARD_PHY_LINKRATE); 2616 phy->maximum_linkrate = hard_phy_linkrate & 0xf; 2617 phy->minimum_linkrate = (hard_phy_linkrate >> 4) & 0xf; 2618 2619 sas_phy->oob_mode = SATA_OOB_MODE; 2620 /* Make up some unique SAS address */ 2621 attached_sas_addr[0] = 0x50; 2622 attached_sas_addr[7] = phy_no; 2623 memcpy(sas_phy->attached_sas_addr, attached_sas_addr, SAS_ADDR_SIZE); 2624 memcpy(sas_phy->frame_rcvd, fis, sizeof(struct dev_to_host_fis)); 2625 dev_info(dev, "sata int phyup: phy%d link_rate=%d\n", phy_no, link_rate); 2626 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); 2627 phy->port_id = port_id; 2628 phy->phy_type |= PORT_TYPE_SATA; 2629 phy->phy_attached = 1; 2630 phy->identify.device_type = SAS_SATA_DEV; 2631 phy->frame_rcvd_size = sizeof(struct dev_to_host_fis); 2632 phy->identify.target_port_protocols = SAS_PROTOCOL_SATA; 2633 queue_work(hisi_hba->wq, &phy->phyup_ws); 2634 2635 end: 2636 hisi_sas_write32(hisi_hba, ENT_INT_SRC1 + offset, ent_tmp); 2637 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1 + offset, ent_msk); 2638 2639 return res; 2640 } 2641 2642 static irq_handler_t phy_interrupts[HISI_SAS_PHY_INT_NR] = { 2643 int_phy_updown_v2_hw, 2644 int_chnl_int_v2_hw, 2645 }; 2646 2647 static irq_handler_t fatal_interrupts[HISI_SAS_FATAL_INT_NR] = { 2648 fatal_ecc_int_v2_hw, 2649 fatal_axi_int_v2_hw 2650 }; 2651 2652 /** 2653 * There is a limitation in the hip06 chipset that we need 2654 * to map in all mbigen interrupts, even if they are not used. 2655 */ 2656 static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba) 2657 { 2658 struct platform_device *pdev = hisi_hba->pdev; 2659 struct device *dev = &pdev->dev; 2660 int i, irq, rc, irq_map[128]; 2661 2662 2663 for (i = 0; i < 128; i++) 2664 irq_map[i] = platform_get_irq(pdev, i); 2665 2666 for (i = 0; i < HISI_SAS_PHY_INT_NR; i++) { 2667 int idx = i; 2668 2669 irq = irq_map[idx + 1]; /* Phy up/down is irq1 */ 2670 if (!irq) { 2671 dev_err(dev, "irq init: fail map phy interrupt %d\n", 2672 idx); 2673 return -ENOENT; 2674 } 2675 2676 rc = devm_request_irq(dev, irq, phy_interrupts[i], 0, 2677 DRV_NAME " phy", hisi_hba); 2678 if (rc) { 2679 dev_err(dev, "irq init: could not request " 2680 "phy interrupt %d, rc=%d\n", 2681 irq, rc); 2682 return -ENOENT; 2683 } 2684 } 2685 2686 for (i = 0; i < hisi_hba->n_phy; i++) { 2687 struct hisi_sas_phy *phy = &hisi_hba->phy[i]; 2688 int idx = i + 72; /* First SATA interrupt is irq72 */ 2689 2690 irq = irq_map[idx]; 2691 if (!irq) { 2692 dev_err(dev, "irq init: fail map phy interrupt %d\n", 2693 idx); 2694 return -ENOENT; 2695 } 2696 2697 rc = devm_request_irq(dev, irq, sata_int_v2_hw, 0, 2698 DRV_NAME " sata", phy); 2699 if (rc) { 2700 dev_err(dev, "irq init: could not request " 2701 "sata interrupt %d, rc=%d\n", 2702 irq, rc); 2703 return -ENOENT; 2704 } 2705 } 2706 2707 for (i = 0; i < HISI_SAS_FATAL_INT_NR; i++) { 2708 int idx = i; 2709 2710 irq = irq_map[idx + 81]; 2711 if (!irq) { 2712 dev_err(dev, "irq init: fail map fatal interrupt %d\n", 2713 idx); 2714 return -ENOENT; 2715 } 2716 2717 rc = devm_request_irq(dev, irq, fatal_interrupts[i], 0, 2718 DRV_NAME " fatal", hisi_hba); 2719 if (rc) { 2720 dev_err(dev, 2721 "irq init: could not request fatal interrupt %d, rc=%d\n", 2722 irq, rc); 2723 return -ENOENT; 2724 } 2725 } 2726 2727 for (i = 0; i < hisi_hba->queue_count; i++) { 2728 int idx = i + 96; /* First cq interrupt is irq96 */ 2729 2730 irq = irq_map[idx]; 2731 if (!irq) { 2732 dev_err(dev, 2733 "irq init: could not map cq interrupt %d\n", 2734 idx); 2735 return -ENOENT; 2736 } 2737 rc = devm_request_irq(dev, irq, cq_interrupt_v2_hw, 0, 2738 DRV_NAME " cq", &hisi_hba->cq[i]); 2739 if (rc) { 2740 dev_err(dev, 2741 "irq init: could not request cq interrupt %d, rc=%d\n", 2742 irq, rc); 2743 return -ENOENT; 2744 } 2745 } 2746 2747 return 0; 2748 } 2749 2750 static int hisi_sas_v2_init(struct hisi_hba *hisi_hba) 2751 { 2752 int rc; 2753 2754 rc = hw_init_v2_hw(hisi_hba); 2755 if (rc) 2756 return rc; 2757 2758 rc = interrupt_init_v2_hw(hisi_hba); 2759 if (rc) 2760 return rc; 2761 2762 phys_init_v2_hw(hisi_hba); 2763 2764 return 0; 2765 } 2766 2767 static const struct hisi_sas_hw hisi_sas_v2_hw = { 2768 .hw_init = hisi_sas_v2_init, 2769 .setup_itct = setup_itct_v2_hw, 2770 .slot_index_alloc = slot_index_alloc_quirk_v2_hw, 2771 .alloc_dev = alloc_dev_quirk_v2_hw, 2772 .sl_notify = sl_notify_v2_hw, 2773 .get_wideport_bitmap = get_wideport_bitmap_v2_hw, 2774 .free_device = free_device_v2_hw, 2775 .prep_smp = prep_smp_v2_hw, 2776 .prep_ssp = prep_ssp_v2_hw, 2777 .prep_stp = prep_ata_v2_hw, 2778 .prep_abort = prep_abort_v2_hw, 2779 .get_free_slot = get_free_slot_v2_hw, 2780 .start_delivery = start_delivery_v2_hw, 2781 .slot_complete = slot_complete_v2_hw, 2782 .phy_enable = enable_phy_v2_hw, 2783 .phy_disable = disable_phy_v2_hw, 2784 .phy_hard_reset = phy_hard_reset_v2_hw, 2785 .phy_set_linkrate = phy_set_linkrate_v2_hw, 2786 .phy_get_max_linkrate = phy_get_max_linkrate_v2_hw, 2787 .max_command_entries = HISI_SAS_COMMAND_ENTRIES_V2_HW, 2788 .complete_hdr_size = sizeof(struct hisi_sas_complete_v2_hdr), 2789 }; 2790 2791 static int hisi_sas_v2_probe(struct platform_device *pdev) 2792 { 2793 /* 2794 * Check if we should defer the probe before we probe the 2795 * upper layer, as it's hard to defer later on. 2796 */ 2797 int ret = platform_get_irq(pdev, 0); 2798 2799 if (ret < 0) { 2800 if (ret != -EPROBE_DEFER) 2801 dev_err(&pdev->dev, "cannot obtain irq\n"); 2802 return ret; 2803 } 2804 2805 return hisi_sas_probe(pdev, &hisi_sas_v2_hw); 2806 } 2807 2808 static int hisi_sas_v2_remove(struct platform_device *pdev) 2809 { 2810 return hisi_sas_remove(pdev); 2811 } 2812 2813 static const struct of_device_id sas_v2_of_match[] = { 2814 { .compatible = "hisilicon,hip06-sas-v2",}, 2815 { .compatible = "hisilicon,hip07-sas-v2",}, 2816 {}, 2817 }; 2818 MODULE_DEVICE_TABLE(of, sas_v2_of_match); 2819 2820 static const struct acpi_device_id sas_v2_acpi_match[] = { 2821 { "HISI0162", 0 }, 2822 { } 2823 }; 2824 2825 MODULE_DEVICE_TABLE(acpi, sas_v2_acpi_match); 2826 2827 static struct platform_driver hisi_sas_v2_driver = { 2828 .probe = hisi_sas_v2_probe, 2829 .remove = hisi_sas_v2_remove, 2830 .driver = { 2831 .name = DRV_NAME, 2832 .of_match_table = sas_v2_of_match, 2833 .acpi_match_table = ACPI_PTR(sas_v2_acpi_match), 2834 }, 2835 }; 2836 2837 module_platform_driver(hisi_sas_v2_driver); 2838 2839 MODULE_LICENSE("GPL"); 2840 MODULE_AUTHOR("John Garry <john.garry@huawei.com>"); 2841 MODULE_DESCRIPTION("HISILICON SAS controller v2 hw driver"); 2842 MODULE_ALIAS("platform:" DRV_NAME); 2843