1f0c568a4SJianyun Li /* 2f0c568a4SJianyun Li * Marvell UMI driver 3f0c568a4SJianyun Li * 4f0c568a4SJianyun Li * Copyright 2011 Marvell. <jyli@marvell.com> 5f0c568a4SJianyun Li * 6f0c568a4SJianyun Li * This file is licensed under GPLv2. 7f0c568a4SJianyun Li * 8f0c568a4SJianyun Li * This program is free software; you can redistribute it and/or 9f0c568a4SJianyun Li * modify it under the terms of the GNU General Public License as 10f0c568a4SJianyun Li * published by the Free Software Foundation; version 2 of the 11f0c568a4SJianyun Li * License. 12f0c568a4SJianyun Li * 13f0c568a4SJianyun Li * This program is distributed in the hope that it will be useful, 14f0c568a4SJianyun Li * but WITHOUT ANY WARRANTY; without even the implied warranty of 15f0c568a4SJianyun Li * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16f0c568a4SJianyun Li * General Public License for more details. 17f0c568a4SJianyun Li * 18f0c568a4SJianyun Li * You should have received a copy of the GNU General Public License 19f0c568a4SJianyun Li * along with this program; if not, write to the Free Software 20f0c568a4SJianyun Li * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 21f0c568a4SJianyun Li * USA 22f0c568a4SJianyun Li */ 23f0c568a4SJianyun Li 24f0c568a4SJianyun Li #include <linux/kernel.h> 25f0c568a4SJianyun Li #include <linux/module.h> 26f0c568a4SJianyun Li #include <linux/moduleparam.h> 27f0c568a4SJianyun Li #include <linux/init.h> 28f0c568a4SJianyun Li #include <linux/device.h> 29f0c568a4SJianyun Li #include <linux/pci.h> 30f0c568a4SJianyun Li #include <linux/list.h> 31f0c568a4SJianyun Li #include <linux/spinlock.h> 32f0c568a4SJianyun Li #include <linux/interrupt.h> 33f0c568a4SJianyun Li #include <linux/delay.h> 3436f8ef7fSTina Ruchandani #include <linux/ktime.h> 35f0c568a4SJianyun Li #include <linux/blkdev.h> 36f0c568a4SJianyun Li #include <linux/io.h> 37f0c568a4SJianyun Li #include <scsi/scsi.h> 38f0c568a4SJianyun Li #include <scsi/scsi_cmnd.h> 39bd756ddeSShun Fu #include <scsi/scsi_device.h> 40f0c568a4SJianyun Li #include <scsi/scsi_host.h> 41f0c568a4SJianyun Li #include <scsi/scsi_transport.h> 42f0c568a4SJianyun Li #include <scsi/scsi_eh.h> 43f0c568a4SJianyun Li #include <linux/uaccess.h> 44bd756ddeSShun Fu #include <linux/kthread.h> 45f0c568a4SJianyun Li 46f0c568a4SJianyun Li #include "mvumi.h" 47f0c568a4SJianyun Li 48f0c568a4SJianyun Li MODULE_LICENSE("GPL"); 49f0c568a4SJianyun Li MODULE_AUTHOR("jyli@marvell.com"); 50f0c568a4SJianyun Li MODULE_DESCRIPTION("Marvell UMI Driver"); 51f0c568a4SJianyun Li 529baa3c34SBenoit Taine static const struct pci_device_id mvumi_pci_table[] = { 53c85bcadcSMyron Stowe { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, PCI_DEVICE_ID_MARVELL_MV9143) }, 54c85bcadcSMyron Stowe { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, PCI_DEVICE_ID_MARVELL_MV9580) }, 55f0c568a4SJianyun Li { 0 } 56f0c568a4SJianyun Li }; 57f0c568a4SJianyun Li 58f0c568a4SJianyun Li MODULE_DEVICE_TABLE(pci, mvumi_pci_table); 59f0c568a4SJianyun Li 60f0c568a4SJianyun Li static void tag_init(struct mvumi_tag *st, unsigned short size) 61f0c568a4SJianyun Li { 62f0c568a4SJianyun Li unsigned short i; 63f0c568a4SJianyun Li BUG_ON(size != st->size); 64f0c568a4SJianyun Li st->top = size; 65f0c568a4SJianyun Li for (i = 0; i < size; i++) 66f0c568a4SJianyun Li st->stack[i] = size - 1 - i; 67f0c568a4SJianyun Li } 68f0c568a4SJianyun Li 69f0c568a4SJianyun Li static unsigned short tag_get_one(struct mvumi_hba *mhba, struct mvumi_tag *st) 70f0c568a4SJianyun Li { 71f0c568a4SJianyun Li BUG_ON(st->top <= 0); 72f0c568a4SJianyun Li return st->stack[--st->top]; 73f0c568a4SJianyun Li } 74f0c568a4SJianyun Li 75f0c568a4SJianyun Li static void tag_release_one(struct mvumi_hba *mhba, struct mvumi_tag *st, 76f0c568a4SJianyun Li unsigned short tag) 77f0c568a4SJianyun Li { 78f0c568a4SJianyun Li BUG_ON(st->top >= st->size); 79f0c568a4SJianyun Li st->stack[st->top++] = tag; 80f0c568a4SJianyun Li } 81f0c568a4SJianyun Li 82f0c568a4SJianyun Li static bool tag_is_empty(struct mvumi_tag *st) 83f0c568a4SJianyun Li { 84f0c568a4SJianyun Li if (st->top == 0) 85f0c568a4SJianyun Li return 1; 86f0c568a4SJianyun Li else 87f0c568a4SJianyun Li return 0; 88f0c568a4SJianyun Li } 89f0c568a4SJianyun Li 90f0c568a4SJianyun Li static void mvumi_unmap_pci_addr(struct pci_dev *dev, void **addr_array) 91f0c568a4SJianyun Li { 92f0c568a4SJianyun Li int i; 93f0c568a4SJianyun Li 94f0c568a4SJianyun Li for (i = 0; i < MAX_BASE_ADDRESS; i++) 95f0c568a4SJianyun Li if ((pci_resource_flags(dev, i) & IORESOURCE_MEM) && 96f0c568a4SJianyun Li addr_array[i]) 97f0c568a4SJianyun Li pci_iounmap(dev, addr_array[i]); 98f0c568a4SJianyun Li } 99f0c568a4SJianyun Li 100f0c568a4SJianyun Li static int mvumi_map_pci_addr(struct pci_dev *dev, void **addr_array) 101f0c568a4SJianyun Li { 102f0c568a4SJianyun Li int i; 103f0c568a4SJianyun Li 104f0c568a4SJianyun Li for (i = 0; i < MAX_BASE_ADDRESS; i++) { 105f0c568a4SJianyun Li if (pci_resource_flags(dev, i) & IORESOURCE_MEM) { 106f0c568a4SJianyun Li addr_array[i] = pci_iomap(dev, i, 0); 107f0c568a4SJianyun Li if (!addr_array[i]) { 108f0c568a4SJianyun Li dev_err(&dev->dev, "failed to map Bar[%d]\n", 109f0c568a4SJianyun Li i); 110f0c568a4SJianyun Li mvumi_unmap_pci_addr(dev, addr_array); 111f0c568a4SJianyun Li return -ENOMEM; 112f0c568a4SJianyun Li } 113f0c568a4SJianyun Li } else 114f0c568a4SJianyun Li addr_array[i] = NULL; 115f0c568a4SJianyun Li 116f0c568a4SJianyun Li dev_dbg(&dev->dev, "Bar %d : %p.\n", i, addr_array[i]); 117f0c568a4SJianyun Li } 118f0c568a4SJianyun Li 119f0c568a4SJianyun Li return 0; 120f0c568a4SJianyun Li } 121f0c568a4SJianyun Li 122f0c568a4SJianyun Li static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba, 123f0c568a4SJianyun Li enum resource_type type, unsigned int size) 124f0c568a4SJianyun Li { 125bd756ddeSShun Fu struct mvumi_res *res = kzalloc(sizeof(*res), GFP_ATOMIC); 126f0c568a4SJianyun Li 127f0c568a4SJianyun Li if (!res) { 128f0c568a4SJianyun Li dev_err(&mhba->pdev->dev, 12959e13d48SMasanari Iida "Failed to allocate memory for resource manager.\n"); 130f0c568a4SJianyun Li return NULL; 131f0c568a4SJianyun Li } 132f0c568a4SJianyun Li 133f0c568a4SJianyun Li switch (type) { 134f0c568a4SJianyun Li case RESOURCE_CACHED_MEMORY: 135bd756ddeSShun Fu res->virt_addr = kzalloc(size, GFP_ATOMIC); 136f0c568a4SJianyun Li if (!res->virt_addr) { 137f0c568a4SJianyun Li dev_err(&mhba->pdev->dev, 138f0c568a4SJianyun Li "unable to allocate memory,size = %d.\n", size); 139f0c568a4SJianyun Li kfree(res); 140f0c568a4SJianyun Li return NULL; 141f0c568a4SJianyun Li } 142f0c568a4SJianyun Li break; 143f0c568a4SJianyun Li 144f0c568a4SJianyun Li case RESOURCE_UNCACHED_MEMORY: 145f0c568a4SJianyun Li size = round_up(size, 8); 146*ab8e7f4bSChristoph Hellwig res->virt_addr = dma_zalloc_coherent(&mhba->pdev->dev, size, 147*ab8e7f4bSChristoph Hellwig &res->bus_addr, GFP_KERNEL); 148f0c568a4SJianyun Li if (!res->virt_addr) { 149f0c568a4SJianyun Li dev_err(&mhba->pdev->dev, 150f0c568a4SJianyun Li "unable to allocate consistent mem," 151f0c568a4SJianyun Li "size = %d.\n", size); 152f0c568a4SJianyun Li kfree(res); 153f0c568a4SJianyun Li return NULL; 154f0c568a4SJianyun Li } 155f0c568a4SJianyun Li break; 156f0c568a4SJianyun Li 157f0c568a4SJianyun Li default: 158f0c568a4SJianyun Li dev_err(&mhba->pdev->dev, "unknown resource type %d.\n", type); 159f0c568a4SJianyun Li kfree(res); 160f0c568a4SJianyun Li return NULL; 161f0c568a4SJianyun Li } 162f0c568a4SJianyun Li 163f0c568a4SJianyun Li res->type = type; 164f0c568a4SJianyun Li res->size = size; 165f0c568a4SJianyun Li INIT_LIST_HEAD(&res->entry); 166f0c568a4SJianyun Li list_add_tail(&res->entry, &mhba->res_list); 167f0c568a4SJianyun Li 168f0c568a4SJianyun Li return res; 169f0c568a4SJianyun Li } 170f0c568a4SJianyun Li 171f0c568a4SJianyun Li static void mvumi_release_mem_resource(struct mvumi_hba *mhba) 172f0c568a4SJianyun Li { 173f0c568a4SJianyun Li struct mvumi_res *res, *tmp; 174f0c568a4SJianyun Li 175f0c568a4SJianyun Li list_for_each_entry_safe(res, tmp, &mhba->res_list, entry) { 176f0c568a4SJianyun Li switch (res->type) { 177f0c568a4SJianyun Li case RESOURCE_UNCACHED_MEMORY: 178*ab8e7f4bSChristoph Hellwig dma_free_coherent(&mhba->pdev->dev, res->size, 179f0c568a4SJianyun Li res->virt_addr, res->bus_addr); 180f0c568a4SJianyun Li break; 181f0c568a4SJianyun Li case RESOURCE_CACHED_MEMORY: 182f0c568a4SJianyun Li kfree(res->virt_addr); 183f0c568a4SJianyun Li break; 184f0c568a4SJianyun Li default: 185f0c568a4SJianyun Li dev_err(&mhba->pdev->dev, 186f0c568a4SJianyun Li "unknown resource type %d\n", res->type); 187f0c568a4SJianyun Li break; 188f0c568a4SJianyun Li } 189f0c568a4SJianyun Li list_del(&res->entry); 190f0c568a4SJianyun Li kfree(res); 191f0c568a4SJianyun Li } 192f0c568a4SJianyun Li mhba->fw_flag &= ~MVUMI_FW_ALLOC; 193f0c568a4SJianyun Li } 194f0c568a4SJianyun Li 195f0c568a4SJianyun Li /** 196f0c568a4SJianyun Li * mvumi_make_sgl - Prepares SGL 197f0c568a4SJianyun Li * @mhba: Adapter soft state 198f0c568a4SJianyun Li * @scmd: SCSI command from the mid-layer 199f0c568a4SJianyun Li * @sgl_p: SGL to be filled in 200f0c568a4SJianyun Li * @sg_count return the number of SG elements 201f0c568a4SJianyun Li * 202f0c568a4SJianyun Li * If successful, this function returns 0. otherwise, it returns -1. 203f0c568a4SJianyun Li */ 204f0c568a4SJianyun Li static int mvumi_make_sgl(struct mvumi_hba *mhba, struct scsi_cmnd *scmd, 205f0c568a4SJianyun Li void *sgl_p, unsigned char *sg_count) 206f0c568a4SJianyun Li { 207f0c568a4SJianyun Li struct scatterlist *sg; 208f0c568a4SJianyun Li struct mvumi_sgl *m_sg = (struct mvumi_sgl *) sgl_p; 209f0c568a4SJianyun Li unsigned int i; 210f0c568a4SJianyun Li unsigned int sgnum = scsi_sg_count(scmd); 211f0c568a4SJianyun Li dma_addr_t busaddr; 212f0c568a4SJianyun Li 213f0c568a4SJianyun Li sg = scsi_sglist(scmd); 214*ab8e7f4bSChristoph Hellwig *sg_count = dma_map_sg(&mhba->pdev->dev, sg, sgnum, 215*ab8e7f4bSChristoph Hellwig scmd->sc_data_direction); 216f0c568a4SJianyun Li if (*sg_count > mhba->max_sge) { 2174bd13a07SAlexey Khoroshilov dev_err(&mhba->pdev->dev, 2184bd13a07SAlexey Khoroshilov "sg count[0x%x] is bigger than max sg[0x%x].\n", 219f0c568a4SJianyun Li *sg_count, mhba->max_sge); 220*ab8e7f4bSChristoph Hellwig dma_unmap_sg(&mhba->pdev->dev, sg, sgnum, 221*ab8e7f4bSChristoph Hellwig scmd->sc_data_direction); 222f0c568a4SJianyun Li return -1; 223f0c568a4SJianyun Li } 224f0c568a4SJianyun Li for (i = 0; i < *sg_count; i++) { 225f0c568a4SJianyun Li busaddr = sg_dma_address(&sg[i]); 226f0c568a4SJianyun Li m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr)); 227f0c568a4SJianyun Li m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr)); 228f0c568a4SJianyun Li m_sg->flags = 0; 229bd756ddeSShun Fu sgd_setsz(mhba, m_sg, cpu_to_le32(sg_dma_len(&sg[i]))); 230f0c568a4SJianyun Li if ((i + 1) == *sg_count) 231bd756ddeSShun Fu m_sg->flags |= 1U << mhba->eot_flag; 232f0c568a4SJianyun Li 233bd756ddeSShun Fu sgd_inc(mhba, m_sg); 234f0c568a4SJianyun Li } 235f0c568a4SJianyun Li 236f0c568a4SJianyun Li return 0; 237f0c568a4SJianyun Li } 238f0c568a4SJianyun Li 239f0c568a4SJianyun Li static int mvumi_internal_cmd_sgl(struct mvumi_hba *mhba, struct mvumi_cmd *cmd, 240f0c568a4SJianyun Li unsigned int size) 241f0c568a4SJianyun Li { 242f0c568a4SJianyun Li struct mvumi_sgl *m_sg; 243f0c568a4SJianyun Li void *virt_addr; 244f0c568a4SJianyun Li dma_addr_t phy_addr; 245f0c568a4SJianyun Li 246f0c568a4SJianyun Li if (size == 0) 247f0c568a4SJianyun Li return 0; 248f0c568a4SJianyun Li 249*ab8e7f4bSChristoph Hellwig virt_addr = dma_zalloc_coherent(&mhba->pdev->dev, size, &phy_addr, 250*ab8e7f4bSChristoph Hellwig GFP_KERNEL); 251f0c568a4SJianyun Li if (!virt_addr) 252f0c568a4SJianyun Li return -1; 253f0c568a4SJianyun Li 254f0c568a4SJianyun Li m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0]; 255f0c568a4SJianyun Li cmd->frame->sg_counts = 1; 256f0c568a4SJianyun Li cmd->data_buf = virt_addr; 257f0c568a4SJianyun Li 258f0c568a4SJianyun Li m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(phy_addr)); 259f0c568a4SJianyun Li m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(phy_addr)); 260bd756ddeSShun Fu m_sg->flags = 1U << mhba->eot_flag; 261bd756ddeSShun Fu sgd_setsz(mhba, m_sg, cpu_to_le32(size)); 262f0c568a4SJianyun Li 263f0c568a4SJianyun Li return 0; 264f0c568a4SJianyun Li } 265f0c568a4SJianyun Li 266f0c568a4SJianyun Li static struct mvumi_cmd *mvumi_create_internal_cmd(struct mvumi_hba *mhba, 267f0c568a4SJianyun Li unsigned int buf_size) 268f0c568a4SJianyun Li { 269f0c568a4SJianyun Li struct mvumi_cmd *cmd; 270f0c568a4SJianyun Li 271f0c568a4SJianyun Li cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 272f0c568a4SJianyun Li if (!cmd) { 273f0c568a4SJianyun Li dev_err(&mhba->pdev->dev, "failed to create a internal cmd\n"); 274f0c568a4SJianyun Li return NULL; 275f0c568a4SJianyun Li } 276f0c568a4SJianyun Li INIT_LIST_HEAD(&cmd->queue_pointer); 277f0c568a4SJianyun Li 278*ab8e7f4bSChristoph Hellwig cmd->frame = dma_alloc_coherent(&mhba->pdev->dev, mhba->ib_max_size, 279*ab8e7f4bSChristoph Hellwig &cmd->frame_phys, GFP_KERNEL); 280f0c568a4SJianyun Li if (!cmd->frame) { 281f0c568a4SJianyun Li dev_err(&mhba->pdev->dev, "failed to allocate memory for FW" 282f0c568a4SJianyun Li " frame,size = %d.\n", mhba->ib_max_size); 283f0c568a4SJianyun Li kfree(cmd); 284f0c568a4SJianyun Li return NULL; 285f0c568a4SJianyun Li } 286f0c568a4SJianyun Li 287f0c568a4SJianyun Li if (buf_size) { 288f0c568a4SJianyun Li if (mvumi_internal_cmd_sgl(mhba, cmd, buf_size)) { 289f0c568a4SJianyun Li dev_err(&mhba->pdev->dev, "failed to allocate memory" 290f0c568a4SJianyun Li " for internal frame\n"); 291*ab8e7f4bSChristoph Hellwig dma_free_coherent(&mhba->pdev->dev, mhba->ib_max_size, 292bd756ddeSShun Fu cmd->frame, cmd->frame_phys); 293f0c568a4SJianyun Li kfree(cmd); 294f0c568a4SJianyun Li return NULL; 295f0c568a4SJianyun Li } 296f0c568a4SJianyun Li } else 297f0c568a4SJianyun Li cmd->frame->sg_counts = 0; 298f0c568a4SJianyun Li 299f0c568a4SJianyun Li return cmd; 300f0c568a4SJianyun Li } 301f0c568a4SJianyun Li 302f0c568a4SJianyun Li static void mvumi_delete_internal_cmd(struct mvumi_hba *mhba, 303f0c568a4SJianyun Li struct mvumi_cmd *cmd) 304f0c568a4SJianyun Li { 305f0c568a4SJianyun Li struct mvumi_sgl *m_sg; 306f0c568a4SJianyun Li unsigned int size; 307f0c568a4SJianyun Li dma_addr_t phy_addr; 308f0c568a4SJianyun Li 309f0c568a4SJianyun Li if (cmd && cmd->frame) { 310f0c568a4SJianyun Li if (cmd->frame->sg_counts) { 311f0c568a4SJianyun Li m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0]; 312bd756ddeSShun Fu sgd_getsz(mhba, m_sg, size); 313f0c568a4SJianyun Li 314f0c568a4SJianyun Li phy_addr = (dma_addr_t) m_sg->baseaddr_l | 315f0c568a4SJianyun Li (dma_addr_t) ((m_sg->baseaddr_h << 16) << 16); 316f0c568a4SJianyun Li 317*ab8e7f4bSChristoph Hellwig dma_free_coherent(&mhba->pdev->dev, size, cmd->data_buf, 318f0c568a4SJianyun Li phy_addr); 319f0c568a4SJianyun Li } 320*ab8e7f4bSChristoph Hellwig dma_free_coherent(&mhba->pdev->dev, mhba->ib_max_size, 321bd756ddeSShun Fu cmd->frame, cmd->frame_phys); 322f0c568a4SJianyun Li kfree(cmd); 323f0c568a4SJianyun Li } 324f0c568a4SJianyun Li } 325f0c568a4SJianyun Li 326f0c568a4SJianyun Li /** 327f0c568a4SJianyun Li * mvumi_get_cmd - Get a command from the free pool 328f0c568a4SJianyun Li * @mhba: Adapter soft state 329f0c568a4SJianyun Li * 330f0c568a4SJianyun Li * Returns a free command from the pool 331f0c568a4SJianyun Li */ 332f0c568a4SJianyun Li static struct mvumi_cmd *mvumi_get_cmd(struct mvumi_hba *mhba) 333f0c568a4SJianyun Li { 334f0c568a4SJianyun Li struct mvumi_cmd *cmd = NULL; 335f0c568a4SJianyun Li 336f0c568a4SJianyun Li if (likely(!list_empty(&mhba->cmd_pool))) { 337f0c568a4SJianyun Li cmd = list_entry((&mhba->cmd_pool)->next, 338f0c568a4SJianyun Li struct mvumi_cmd, queue_pointer); 339f0c568a4SJianyun Li list_del_init(&cmd->queue_pointer); 340f0c568a4SJianyun Li } else 341f0c568a4SJianyun Li dev_warn(&mhba->pdev->dev, "command pool is empty!\n"); 342f0c568a4SJianyun Li 343f0c568a4SJianyun Li return cmd; 344f0c568a4SJianyun Li } 345f0c568a4SJianyun Li 346f0c568a4SJianyun Li /** 347f0c568a4SJianyun Li * mvumi_return_cmd - Return a cmd to free command pool 348f0c568a4SJianyun Li * @mhba: Adapter soft state 349f0c568a4SJianyun Li * @cmd: Command packet to be returned to free command pool 350f0c568a4SJianyun Li */ 351f0c568a4SJianyun Li static inline void mvumi_return_cmd(struct mvumi_hba *mhba, 352f0c568a4SJianyun Li struct mvumi_cmd *cmd) 353f0c568a4SJianyun Li { 354f0c568a4SJianyun Li cmd->scmd = NULL; 355f0c568a4SJianyun Li list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool); 356f0c568a4SJianyun Li } 357f0c568a4SJianyun Li 358f0c568a4SJianyun Li /** 359f0c568a4SJianyun Li * mvumi_free_cmds - Free all the cmds in the free cmd pool 360f0c568a4SJianyun Li * @mhba: Adapter soft state 361f0c568a4SJianyun Li */ 362f0c568a4SJianyun Li static void mvumi_free_cmds(struct mvumi_hba *mhba) 363f0c568a4SJianyun Li { 364f0c568a4SJianyun Li struct mvumi_cmd *cmd; 365f0c568a4SJianyun Li 366f0c568a4SJianyun Li while (!list_empty(&mhba->cmd_pool)) { 367f0c568a4SJianyun Li cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd, 368f0c568a4SJianyun Li queue_pointer); 369f0c568a4SJianyun Li list_del(&cmd->queue_pointer); 370bd756ddeSShun Fu if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)) 371f0c568a4SJianyun Li kfree(cmd->frame); 372f0c568a4SJianyun Li kfree(cmd); 373f0c568a4SJianyun Li } 374f0c568a4SJianyun Li } 375f0c568a4SJianyun Li 376f0c568a4SJianyun Li /** 377f0c568a4SJianyun Li * mvumi_alloc_cmds - Allocates the command packets 378f0c568a4SJianyun Li * @mhba: Adapter soft state 379f0c568a4SJianyun Li * 380f0c568a4SJianyun Li */ 381f0c568a4SJianyun Li static int mvumi_alloc_cmds(struct mvumi_hba *mhba) 382f0c568a4SJianyun Li { 383f0c568a4SJianyun Li int i; 384f0c568a4SJianyun Li struct mvumi_cmd *cmd; 385f0c568a4SJianyun Li 386f0c568a4SJianyun Li for (i = 0; i < mhba->max_io; i++) { 387f0c568a4SJianyun Li cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 388f0c568a4SJianyun Li if (!cmd) 389f0c568a4SJianyun Li goto err_exit; 390f0c568a4SJianyun Li 391f0c568a4SJianyun Li INIT_LIST_HEAD(&cmd->queue_pointer); 392f0c568a4SJianyun Li list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool); 393bd756ddeSShun Fu if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) { 394bd756ddeSShun Fu cmd->frame = mhba->ib_frame + i * mhba->ib_max_size; 395bd756ddeSShun Fu cmd->frame_phys = mhba->ib_frame_phys 396bd756ddeSShun Fu + i * mhba->ib_max_size; 397bd756ddeSShun Fu } else 398f0c568a4SJianyun Li cmd->frame = kzalloc(mhba->ib_max_size, GFP_KERNEL); 399f0c568a4SJianyun Li if (!cmd->frame) 400f0c568a4SJianyun Li goto err_exit; 401f0c568a4SJianyun Li } 402f0c568a4SJianyun Li return 0; 403f0c568a4SJianyun Li 404f0c568a4SJianyun Li err_exit: 405f0c568a4SJianyun Li dev_err(&mhba->pdev->dev, 406f0c568a4SJianyun Li "failed to allocate memory for cmd[0x%x].\n", i); 407f0c568a4SJianyun Li while (!list_empty(&mhba->cmd_pool)) { 408f0c568a4SJianyun Li cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd, 409f0c568a4SJianyun Li queue_pointer); 410f0c568a4SJianyun Li list_del(&cmd->queue_pointer); 411bd756ddeSShun Fu if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)) 412f0c568a4SJianyun Li kfree(cmd->frame); 413f0c568a4SJianyun Li kfree(cmd); 414f0c568a4SJianyun Li } 415f0c568a4SJianyun Li return -ENOMEM; 416f0c568a4SJianyun Li } 417f0c568a4SJianyun Li 418bd756ddeSShun Fu static unsigned int mvumi_check_ib_list_9143(struct mvumi_hba *mhba) 419f0c568a4SJianyun Li { 420bd756ddeSShun Fu unsigned int ib_rp_reg; 421bd756ddeSShun Fu struct mvumi_hw_regs *regs = mhba->regs; 422f0c568a4SJianyun Li 423bd756ddeSShun Fu ib_rp_reg = ioread32(mhba->regs->inb_read_pointer); 424bd756ddeSShun Fu 425bd756ddeSShun Fu if (unlikely(((ib_rp_reg & regs->cl_slot_num_mask) == 426bd756ddeSShun Fu (mhba->ib_cur_slot & regs->cl_slot_num_mask)) && 427bd756ddeSShun Fu ((ib_rp_reg & regs->cl_pointer_toggle) 428bd756ddeSShun Fu != (mhba->ib_cur_slot & regs->cl_pointer_toggle)))) { 429bd756ddeSShun Fu dev_warn(&mhba->pdev->dev, "no free slot to use.\n"); 430bd756ddeSShun Fu return 0; 431bd756ddeSShun Fu } 432f0c568a4SJianyun Li if (atomic_read(&mhba->fw_outstanding) >= mhba->max_io) { 433f0c568a4SJianyun Li dev_warn(&mhba->pdev->dev, "firmware io overflow.\n"); 434bd756ddeSShun Fu return 0; 435bd756ddeSShun Fu } else { 436bd756ddeSShun Fu return mhba->max_io - atomic_read(&mhba->fw_outstanding); 437f0c568a4SJianyun Li } 438f0c568a4SJianyun Li } 439f0c568a4SJianyun Li 440bd756ddeSShun Fu static unsigned int mvumi_check_ib_list_9580(struct mvumi_hba *mhba) 441bd756ddeSShun Fu { 442bd756ddeSShun Fu unsigned int count; 443bd756ddeSShun Fu if (atomic_read(&mhba->fw_outstanding) >= (mhba->max_io - 1)) 444bd756ddeSShun Fu return 0; 445bd756ddeSShun Fu count = ioread32(mhba->ib_shadow); 446bd756ddeSShun Fu if (count == 0xffff) 447bd756ddeSShun Fu return 0; 448bd756ddeSShun Fu return count; 449bd756ddeSShun Fu } 450bd756ddeSShun Fu 451bd756ddeSShun Fu static void mvumi_get_ib_list_entry(struct mvumi_hba *mhba, void **ib_entry) 452bd756ddeSShun Fu { 453bd756ddeSShun Fu unsigned int cur_ib_entry; 454bd756ddeSShun Fu 455bd756ddeSShun Fu cur_ib_entry = mhba->ib_cur_slot & mhba->regs->cl_slot_num_mask; 456f0c568a4SJianyun Li cur_ib_entry++; 457f0c568a4SJianyun Li if (cur_ib_entry >= mhba->list_num_io) { 458f0c568a4SJianyun Li cur_ib_entry -= mhba->list_num_io; 459bd756ddeSShun Fu mhba->ib_cur_slot ^= mhba->regs->cl_pointer_toggle; 460f0c568a4SJianyun Li } 461bd756ddeSShun Fu mhba->ib_cur_slot &= ~mhba->regs->cl_slot_num_mask; 462bd756ddeSShun Fu mhba->ib_cur_slot |= (cur_ib_entry & mhba->regs->cl_slot_num_mask); 463bd756ddeSShun Fu if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) { 464bd756ddeSShun Fu *ib_entry = mhba->ib_list + cur_ib_entry * 465bd756ddeSShun Fu sizeof(struct mvumi_dyn_list_entry); 466bd756ddeSShun Fu } else { 467f0c568a4SJianyun Li *ib_entry = mhba->ib_list + cur_ib_entry * mhba->ib_max_size; 468bd756ddeSShun Fu } 469f0c568a4SJianyun Li atomic_inc(&mhba->fw_outstanding); 470f0c568a4SJianyun Li } 471f0c568a4SJianyun Li 472f0c568a4SJianyun Li static void mvumi_send_ib_list_entry(struct mvumi_hba *mhba) 473f0c568a4SJianyun Li { 474bd756ddeSShun Fu iowrite32(0xffff, mhba->ib_shadow); 475bd756ddeSShun Fu iowrite32(mhba->ib_cur_slot, mhba->regs->inb_write_pointer); 476f0c568a4SJianyun Li } 477f0c568a4SJianyun Li 478f0c568a4SJianyun Li static char mvumi_check_ob_frame(struct mvumi_hba *mhba, 479f0c568a4SJianyun Li unsigned int cur_obf, struct mvumi_rsp_frame *p_outb_frame) 480f0c568a4SJianyun Li { 481f0c568a4SJianyun Li unsigned short tag, request_id; 482f0c568a4SJianyun Li 483f0c568a4SJianyun Li udelay(1); 484f0c568a4SJianyun Li p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size; 485f0c568a4SJianyun Li request_id = p_outb_frame->request_id; 486f0c568a4SJianyun Li tag = p_outb_frame->tag; 487f0c568a4SJianyun Li if (tag > mhba->tag_pool.size) { 488f0c568a4SJianyun Li dev_err(&mhba->pdev->dev, "ob frame data error\n"); 489f0c568a4SJianyun Li return -1; 490f0c568a4SJianyun Li } 491f0c568a4SJianyun Li if (mhba->tag_cmd[tag] == NULL) { 492f0c568a4SJianyun Li dev_err(&mhba->pdev->dev, "tag[0x%x] with NO command\n", tag); 493f0c568a4SJianyun Li return -1; 494f0c568a4SJianyun Li } else if (mhba->tag_cmd[tag]->request_id != request_id && 495f0c568a4SJianyun Li mhba->request_id_enabled) { 496f0c568a4SJianyun Li dev_err(&mhba->pdev->dev, "request ID from FW:0x%x," 497f0c568a4SJianyun Li "cmd request ID:0x%x\n", request_id, 498f0c568a4SJianyun Li mhba->tag_cmd[tag]->request_id); 499f0c568a4SJianyun Li return -1; 500f0c568a4SJianyun Li } 501f0c568a4SJianyun Li 502f0c568a4SJianyun Li return 0; 503f0c568a4SJianyun Li } 504f0c568a4SJianyun Li 505bd756ddeSShun Fu static int mvumi_check_ob_list_9143(struct mvumi_hba *mhba, 506bd756ddeSShun Fu unsigned int *cur_obf, unsigned int *assign_obf_end) 507bd756ddeSShun Fu { 508bd756ddeSShun Fu unsigned int ob_write, ob_write_shadow; 509bd756ddeSShun Fu struct mvumi_hw_regs *regs = mhba->regs; 510bd756ddeSShun Fu 511bd756ddeSShun Fu do { 512bd756ddeSShun Fu ob_write = ioread32(regs->outb_copy_pointer); 513bd756ddeSShun Fu ob_write_shadow = ioread32(mhba->ob_shadow); 514bd756ddeSShun Fu } while ((ob_write & regs->cl_slot_num_mask) != ob_write_shadow); 515bd756ddeSShun Fu 516bd756ddeSShun Fu *cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask; 517bd756ddeSShun Fu *assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask; 518bd756ddeSShun Fu 519bd756ddeSShun Fu if ((ob_write & regs->cl_pointer_toggle) != 520bd756ddeSShun Fu (mhba->ob_cur_slot & regs->cl_pointer_toggle)) { 521bd756ddeSShun Fu *assign_obf_end += mhba->list_num_io; 522bd756ddeSShun Fu } 523bd756ddeSShun Fu return 0; 524bd756ddeSShun Fu } 525bd756ddeSShun Fu 526bd756ddeSShun Fu static int mvumi_check_ob_list_9580(struct mvumi_hba *mhba, 527bd756ddeSShun Fu unsigned int *cur_obf, unsigned int *assign_obf_end) 528bd756ddeSShun Fu { 529bd756ddeSShun Fu unsigned int ob_write; 530bd756ddeSShun Fu struct mvumi_hw_regs *regs = mhba->regs; 531bd756ddeSShun Fu 532bd756ddeSShun Fu ob_write = ioread32(regs->outb_read_pointer); 533bd756ddeSShun Fu ob_write = ioread32(regs->outb_copy_pointer); 534bd756ddeSShun Fu *cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask; 535bd756ddeSShun Fu *assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask; 536bd756ddeSShun Fu if (*assign_obf_end < *cur_obf) 537bd756ddeSShun Fu *assign_obf_end += mhba->list_num_io; 538bd756ddeSShun Fu else if (*assign_obf_end == *cur_obf) 539bd756ddeSShun Fu return -1; 540bd756ddeSShun Fu return 0; 541bd756ddeSShun Fu } 542bd756ddeSShun Fu 543f0c568a4SJianyun Li static void mvumi_receive_ob_list_entry(struct mvumi_hba *mhba) 544f0c568a4SJianyun Li { 545f0c568a4SJianyun Li unsigned int cur_obf, assign_obf_end, i; 546f0c568a4SJianyun Li struct mvumi_ob_data *ob_data; 547f0c568a4SJianyun Li struct mvumi_rsp_frame *p_outb_frame; 548bd756ddeSShun Fu struct mvumi_hw_regs *regs = mhba->regs; 549f0c568a4SJianyun Li 550bd756ddeSShun Fu if (mhba->instancet->check_ob_list(mhba, &cur_obf, &assign_obf_end)) 551bd756ddeSShun Fu return; 552f0c568a4SJianyun Li 553f0c568a4SJianyun Li for (i = (assign_obf_end - cur_obf); i != 0; i--) { 554f0c568a4SJianyun Li cur_obf++; 555f0c568a4SJianyun Li if (cur_obf >= mhba->list_num_io) { 556f0c568a4SJianyun Li cur_obf -= mhba->list_num_io; 557bd756ddeSShun Fu mhba->ob_cur_slot ^= regs->cl_pointer_toggle; 558f0c568a4SJianyun Li } 559f0c568a4SJianyun Li 560f0c568a4SJianyun Li p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size; 561f0c568a4SJianyun Li 562f0c568a4SJianyun Li /* Copy pointer may point to entry in outbound list 563f0c568a4SJianyun Li * before entry has valid data 564f0c568a4SJianyun Li */ 565f0c568a4SJianyun Li if (unlikely(p_outb_frame->tag > mhba->tag_pool.size || 566f0c568a4SJianyun Li mhba->tag_cmd[p_outb_frame->tag] == NULL || 567f0c568a4SJianyun Li p_outb_frame->request_id != 568f0c568a4SJianyun Li mhba->tag_cmd[p_outb_frame->tag]->request_id)) 569f0c568a4SJianyun Li if (mvumi_check_ob_frame(mhba, cur_obf, p_outb_frame)) 570f0c568a4SJianyun Li continue; 571f0c568a4SJianyun Li 572f0c568a4SJianyun Li if (!list_empty(&mhba->ob_data_list)) { 573f0c568a4SJianyun Li ob_data = (struct mvumi_ob_data *) 574f0c568a4SJianyun Li list_first_entry(&mhba->ob_data_list, 575f0c568a4SJianyun Li struct mvumi_ob_data, list); 576f0c568a4SJianyun Li list_del_init(&ob_data->list); 577f0c568a4SJianyun Li } else { 578f0c568a4SJianyun Li ob_data = NULL; 579f0c568a4SJianyun Li if (cur_obf == 0) { 580f0c568a4SJianyun Li cur_obf = mhba->list_num_io - 1; 581bd756ddeSShun Fu mhba->ob_cur_slot ^= regs->cl_pointer_toggle; 582f0c568a4SJianyun Li } else 583f0c568a4SJianyun Li cur_obf -= 1; 584f0c568a4SJianyun Li break; 585f0c568a4SJianyun Li } 586f0c568a4SJianyun Li 587f0c568a4SJianyun Li memcpy(ob_data->data, p_outb_frame, mhba->ob_max_size); 588f0c568a4SJianyun Li p_outb_frame->tag = 0xff; 589f0c568a4SJianyun Li 590f0c568a4SJianyun Li list_add_tail(&ob_data->list, &mhba->free_ob_list); 591f0c568a4SJianyun Li } 592bd756ddeSShun Fu mhba->ob_cur_slot &= ~regs->cl_slot_num_mask; 593bd756ddeSShun Fu mhba->ob_cur_slot |= (cur_obf & regs->cl_slot_num_mask); 594bd756ddeSShun Fu iowrite32(mhba->ob_cur_slot, regs->outb_read_pointer); 595f0c568a4SJianyun Li } 596f0c568a4SJianyun Li 597bd756ddeSShun Fu static void mvumi_reset(struct mvumi_hba *mhba) 598f0c568a4SJianyun Li { 599bd756ddeSShun Fu struct mvumi_hw_regs *regs = mhba->regs; 600bd756ddeSShun Fu 601bd756ddeSShun Fu iowrite32(0, regs->enpointa_mask_reg); 602bd756ddeSShun Fu if (ioread32(regs->arm_to_pciea_msg1) != HANDSHAKE_DONESTATE) 603f0c568a4SJianyun Li return; 604f0c568a4SJianyun Li 605bd756ddeSShun Fu iowrite32(DRBL_SOFT_RESET, regs->pciea_to_arm_drbl_reg); 606f0c568a4SJianyun Li } 607f0c568a4SJianyun Li 608f0c568a4SJianyun Li static unsigned char mvumi_start(struct mvumi_hba *mhba); 609f0c568a4SJianyun Li 610f0c568a4SJianyun Li static int mvumi_wait_for_outstanding(struct mvumi_hba *mhba) 611f0c568a4SJianyun Li { 612f0c568a4SJianyun Li mhba->fw_state = FW_STATE_ABORT; 613bd756ddeSShun Fu mvumi_reset(mhba); 614f0c568a4SJianyun Li 615f0c568a4SJianyun Li if (mvumi_start(mhba)) 616f0c568a4SJianyun Li return FAILED; 617f0c568a4SJianyun Li else 618f0c568a4SJianyun Li return SUCCESS; 619f0c568a4SJianyun Li } 620f0c568a4SJianyun Li 621bd756ddeSShun Fu static int mvumi_wait_for_fw(struct mvumi_hba *mhba) 622bd756ddeSShun Fu { 623bd756ddeSShun Fu struct mvumi_hw_regs *regs = mhba->regs; 624bd756ddeSShun Fu u32 tmp; 625bd756ddeSShun Fu unsigned long before; 626bd756ddeSShun Fu before = jiffies; 627bd756ddeSShun Fu 628bd756ddeSShun Fu iowrite32(0, regs->enpointa_mask_reg); 629bd756ddeSShun Fu tmp = ioread32(regs->arm_to_pciea_msg1); 630bd756ddeSShun Fu while (tmp != HANDSHAKE_READYSTATE) { 631bd756ddeSShun Fu iowrite32(DRBL_MU_RESET, regs->pciea_to_arm_drbl_reg); 632bd756ddeSShun Fu if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) { 633bd756ddeSShun Fu dev_err(&mhba->pdev->dev, 634bd756ddeSShun Fu "FW reset failed [0x%x].\n", tmp); 635bd756ddeSShun Fu return FAILED; 636bd756ddeSShun Fu } 637bd756ddeSShun Fu 638bd756ddeSShun Fu msleep(500); 639bd756ddeSShun Fu rmb(); 640bd756ddeSShun Fu tmp = ioread32(regs->arm_to_pciea_msg1); 641bd756ddeSShun Fu } 642bd756ddeSShun Fu 643bd756ddeSShun Fu return SUCCESS; 644bd756ddeSShun Fu } 645bd756ddeSShun Fu 646bd756ddeSShun Fu static void mvumi_backup_bar_addr(struct mvumi_hba *mhba) 647bd756ddeSShun Fu { 648bd756ddeSShun Fu unsigned char i; 649bd756ddeSShun Fu 650bd756ddeSShun Fu for (i = 0; i < MAX_BASE_ADDRESS; i++) { 651bd756ddeSShun Fu pci_read_config_dword(mhba->pdev, 0x10 + i * 4, 652bd756ddeSShun Fu &mhba->pci_base[i]); 653bd756ddeSShun Fu } 654bd756ddeSShun Fu } 655bd756ddeSShun Fu 656bd756ddeSShun Fu static void mvumi_restore_bar_addr(struct mvumi_hba *mhba) 657bd756ddeSShun Fu { 658bd756ddeSShun Fu unsigned char i; 659bd756ddeSShun Fu 660bd756ddeSShun Fu for (i = 0; i < MAX_BASE_ADDRESS; i++) { 661bd756ddeSShun Fu if (mhba->pci_base[i]) 662bd756ddeSShun Fu pci_write_config_dword(mhba->pdev, 0x10 + i * 4, 663bd756ddeSShun Fu mhba->pci_base[i]); 664bd756ddeSShun Fu } 665bd756ddeSShun Fu } 666bd756ddeSShun Fu 667*ab8e7f4bSChristoph Hellwig static int mvumi_pci_set_master(struct pci_dev *pdev) 668bd756ddeSShun Fu { 669*ab8e7f4bSChristoph Hellwig int ret = 0; 670*ab8e7f4bSChristoph Hellwig 671bd756ddeSShun Fu pci_set_master(pdev); 672bd756ddeSShun Fu 673bd756ddeSShun Fu if (IS_DMA64) { 674*ab8e7f4bSChristoph Hellwig if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) 675*ab8e7f4bSChristoph Hellwig ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 676bd756ddeSShun Fu } else 677*ab8e7f4bSChristoph Hellwig ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 678bd756ddeSShun Fu 679bd756ddeSShun Fu return ret; 680bd756ddeSShun Fu } 681bd756ddeSShun Fu 682bd756ddeSShun Fu static int mvumi_reset_host_9580(struct mvumi_hba *mhba) 683bd756ddeSShun Fu { 684bd756ddeSShun Fu mhba->fw_state = FW_STATE_ABORT; 685bd756ddeSShun Fu 686bd756ddeSShun Fu iowrite32(0, mhba->regs->reset_enable); 687bd756ddeSShun Fu iowrite32(0xf, mhba->regs->reset_request); 688bd756ddeSShun Fu 689bd756ddeSShun Fu iowrite32(0x10, mhba->regs->reset_enable); 690bd756ddeSShun Fu iowrite32(0x10, mhba->regs->reset_request); 691bd756ddeSShun Fu msleep(100); 692bd756ddeSShun Fu pci_disable_device(mhba->pdev); 693bd756ddeSShun Fu 694bd756ddeSShun Fu if (pci_enable_device(mhba->pdev)) { 695bd756ddeSShun Fu dev_err(&mhba->pdev->dev, "enable device failed\n"); 696bd756ddeSShun Fu return FAILED; 697bd756ddeSShun Fu } 698bd756ddeSShun Fu if (mvumi_pci_set_master(mhba->pdev)) { 699bd756ddeSShun Fu dev_err(&mhba->pdev->dev, "set master failed\n"); 700bd756ddeSShun Fu return FAILED; 701bd756ddeSShun Fu } 702bd756ddeSShun Fu mvumi_restore_bar_addr(mhba); 703bd756ddeSShun Fu if (mvumi_wait_for_fw(mhba) == FAILED) 704bd756ddeSShun Fu return FAILED; 705bd756ddeSShun Fu 706bd756ddeSShun Fu return mvumi_wait_for_outstanding(mhba); 707bd756ddeSShun Fu } 708bd756ddeSShun Fu 709bd756ddeSShun Fu static int mvumi_reset_host_9143(struct mvumi_hba *mhba) 710bd756ddeSShun Fu { 711bd756ddeSShun Fu return mvumi_wait_for_outstanding(mhba); 712bd756ddeSShun Fu } 713bd756ddeSShun Fu 714f0c568a4SJianyun Li static int mvumi_host_reset(struct scsi_cmnd *scmd) 715f0c568a4SJianyun Li { 716f0c568a4SJianyun Li struct mvumi_hba *mhba; 717f0c568a4SJianyun Li 718f0c568a4SJianyun Li mhba = (struct mvumi_hba *) scmd->device->host->hostdata; 719f0c568a4SJianyun Li 720f0c568a4SJianyun Li scmd_printk(KERN_NOTICE, scmd, "RESET -%ld cmd=%x retries=%x\n", 721f0c568a4SJianyun Li scmd->serial_number, scmd->cmnd[0], scmd->retries); 722f0c568a4SJianyun Li 723bd756ddeSShun Fu return mhba->instancet->reset_host(mhba); 724f0c568a4SJianyun Li } 725f0c568a4SJianyun Li 726f0c568a4SJianyun Li static int mvumi_issue_blocked_cmd(struct mvumi_hba *mhba, 727f0c568a4SJianyun Li struct mvumi_cmd *cmd) 728f0c568a4SJianyun Li { 729f0c568a4SJianyun Li unsigned long flags; 730f0c568a4SJianyun Li 731f0c568a4SJianyun Li cmd->cmd_status = REQ_STATUS_PENDING; 732f0c568a4SJianyun Li 733f0c568a4SJianyun Li if (atomic_read(&cmd->sync_cmd)) { 734f0c568a4SJianyun Li dev_err(&mhba->pdev->dev, 735f0c568a4SJianyun Li "last blocked cmd not finished, sync_cmd = %d\n", 736f0c568a4SJianyun Li atomic_read(&cmd->sync_cmd)); 737f0c568a4SJianyun Li BUG_ON(1); 738f0c568a4SJianyun Li return -1; 739f0c568a4SJianyun Li } 740f0c568a4SJianyun Li atomic_inc(&cmd->sync_cmd); 741f0c568a4SJianyun Li spin_lock_irqsave(mhba->shost->host_lock, flags); 742f0c568a4SJianyun Li mhba->instancet->fire_cmd(mhba, cmd); 743f0c568a4SJianyun Li spin_unlock_irqrestore(mhba->shost->host_lock, flags); 744f0c568a4SJianyun Li 745f0c568a4SJianyun Li wait_event_timeout(mhba->int_cmd_wait_q, 746f0c568a4SJianyun Li (cmd->cmd_status != REQ_STATUS_PENDING), 747f0c568a4SJianyun Li MVUMI_INTERNAL_CMD_WAIT_TIME * HZ); 748f0c568a4SJianyun Li 749f0c568a4SJianyun Li /* command timeout */ 750f0c568a4SJianyun Li if (atomic_read(&cmd->sync_cmd)) { 751f0c568a4SJianyun Li spin_lock_irqsave(mhba->shost->host_lock, flags); 752f0c568a4SJianyun Li atomic_dec(&cmd->sync_cmd); 753f0c568a4SJianyun Li if (mhba->tag_cmd[cmd->frame->tag]) { 754f0c568a4SJianyun Li mhba->tag_cmd[cmd->frame->tag] = 0; 755f0c568a4SJianyun Li dev_warn(&mhba->pdev->dev, "TIMEOUT:release tag [%d]\n", 756f0c568a4SJianyun Li cmd->frame->tag); 757f0c568a4SJianyun Li tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag); 758f0c568a4SJianyun Li } 759f0c568a4SJianyun Li if (!list_empty(&cmd->queue_pointer)) { 760f0c568a4SJianyun Li dev_warn(&mhba->pdev->dev, 761f0c568a4SJianyun Li "TIMEOUT:A internal command doesn't send!\n"); 762f0c568a4SJianyun Li list_del_init(&cmd->queue_pointer); 763f0c568a4SJianyun Li } else 764f0c568a4SJianyun Li atomic_dec(&mhba->fw_outstanding); 765f0c568a4SJianyun Li 766f0c568a4SJianyun Li spin_unlock_irqrestore(mhba->shost->host_lock, flags); 767f0c568a4SJianyun Li } 768f0c568a4SJianyun Li return 0; 769f0c568a4SJianyun Li } 770f0c568a4SJianyun Li 771f0c568a4SJianyun Li static void mvumi_release_fw(struct mvumi_hba *mhba) 772f0c568a4SJianyun Li { 773f0c568a4SJianyun Li mvumi_free_cmds(mhba); 774f0c568a4SJianyun Li mvumi_release_mem_resource(mhba); 775f0c568a4SJianyun Li mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr); 776*ab8e7f4bSChristoph Hellwig dma_free_coherent(&mhba->pdev->dev, HSP_MAX_SIZE, 777bd756ddeSShun Fu mhba->handshake_page, mhba->handshake_page_phys); 778bd756ddeSShun Fu kfree(mhba->regs); 779f0c568a4SJianyun Li pci_release_regions(mhba->pdev); 780f0c568a4SJianyun Li } 781f0c568a4SJianyun Li 782f0c568a4SJianyun Li static unsigned char mvumi_flush_cache(struct mvumi_hba *mhba) 783f0c568a4SJianyun Li { 784f0c568a4SJianyun Li struct mvumi_cmd *cmd; 785f0c568a4SJianyun Li struct mvumi_msg_frame *frame; 786f0c568a4SJianyun Li unsigned char device_id, retry = 0; 787f0c568a4SJianyun Li unsigned char bitcount = sizeof(unsigned char) * 8; 788f0c568a4SJianyun Li 789f0c568a4SJianyun Li for (device_id = 0; device_id < mhba->max_target_id; device_id++) { 790f0c568a4SJianyun Li if (!(mhba->target_map[device_id / bitcount] & 791f0c568a4SJianyun Li (1 << (device_id % bitcount)))) 792f0c568a4SJianyun Li continue; 793f0c568a4SJianyun Li get_cmd: cmd = mvumi_create_internal_cmd(mhba, 0); 794f0c568a4SJianyun Li if (!cmd) { 795f0c568a4SJianyun Li if (retry++ >= 5) { 796f0c568a4SJianyun Li dev_err(&mhba->pdev->dev, "failed to get memory" 797f0c568a4SJianyun Li " for internal flush cache cmd for " 798f0c568a4SJianyun Li "device %d", device_id); 799f0c568a4SJianyun Li retry = 0; 800f0c568a4SJianyun Li continue; 801f0c568a4SJianyun Li } else 802f0c568a4SJianyun Li goto get_cmd; 803f0c568a4SJianyun Li } 804f0c568a4SJianyun Li cmd->scmd = NULL; 805f0c568a4SJianyun Li cmd->cmd_status = REQ_STATUS_PENDING; 806f0c568a4SJianyun Li atomic_set(&cmd->sync_cmd, 0); 807f0c568a4SJianyun Li frame = cmd->frame; 808f0c568a4SJianyun Li frame->req_function = CL_FUN_SCSI_CMD; 809f0c568a4SJianyun Li frame->device_id = device_id; 810f0c568a4SJianyun Li frame->cmd_flag = CMD_FLAG_NON_DATA; 811f0c568a4SJianyun Li frame->data_transfer_length = 0; 812f0c568a4SJianyun Li frame->cdb_length = MAX_COMMAND_SIZE; 813f0c568a4SJianyun Li memset(frame->cdb, 0, MAX_COMMAND_SIZE); 814f0c568a4SJianyun Li frame->cdb[0] = SCSI_CMD_MARVELL_SPECIFIC; 815bd756ddeSShun Fu frame->cdb[1] = CDB_CORE_MODULE; 816f0c568a4SJianyun Li frame->cdb[2] = CDB_CORE_SHUTDOWN; 817f0c568a4SJianyun Li 818f0c568a4SJianyun Li mvumi_issue_blocked_cmd(mhba, cmd); 819f0c568a4SJianyun Li if (cmd->cmd_status != SAM_STAT_GOOD) { 820f0c568a4SJianyun Li dev_err(&mhba->pdev->dev, 821f0c568a4SJianyun Li "device %d flush cache failed, status=0x%x.\n", 822f0c568a4SJianyun Li device_id, cmd->cmd_status); 823f0c568a4SJianyun Li } 824f0c568a4SJianyun Li 825f0c568a4SJianyun Li mvumi_delete_internal_cmd(mhba, cmd); 826f0c568a4SJianyun Li } 827f0c568a4SJianyun Li return 0; 828f0c568a4SJianyun Li } 829f0c568a4SJianyun Li 830f0c568a4SJianyun Li static unsigned char 831f0c568a4SJianyun Li mvumi_calculate_checksum(struct mvumi_hs_header *p_header, 832f0c568a4SJianyun Li unsigned short len) 833f0c568a4SJianyun Li { 834f0c568a4SJianyun Li unsigned char *ptr; 835f0c568a4SJianyun Li unsigned char ret = 0, i; 836f0c568a4SJianyun Li 837f0c568a4SJianyun Li ptr = (unsigned char *) p_header->frame_content; 838f0c568a4SJianyun Li for (i = 0; i < len; i++) { 839f0c568a4SJianyun Li ret ^= *ptr; 840f0c568a4SJianyun Li ptr++; 841f0c568a4SJianyun Li } 842f0c568a4SJianyun Li 843f0c568a4SJianyun Li return ret; 844f0c568a4SJianyun Li } 845f0c568a4SJianyun Li 846bd756ddeSShun Fu static void mvumi_hs_build_page(struct mvumi_hba *mhba, 847f0c568a4SJianyun Li struct mvumi_hs_header *hs_header) 848f0c568a4SJianyun Li { 849f0c568a4SJianyun Li struct mvumi_hs_page2 *hs_page2; 850f0c568a4SJianyun Li struct mvumi_hs_page4 *hs_page4; 851f0c568a4SJianyun Li struct mvumi_hs_page3 *hs_page3; 85236f8ef7fSTina Ruchandani u64 time; 85336f8ef7fSTina Ruchandani u64 local_time; 854f0c568a4SJianyun Li 855f0c568a4SJianyun Li switch (hs_header->page_code) { 856f0c568a4SJianyun Li case HS_PAGE_HOST_INFO: 857f0c568a4SJianyun Li hs_page2 = (struct mvumi_hs_page2 *) hs_header; 858f0c568a4SJianyun Li hs_header->frame_length = sizeof(*hs_page2) - 4; 859f0c568a4SJianyun Li memset(hs_header->frame_content, 0, hs_header->frame_length); 860f0c568a4SJianyun Li hs_page2->host_type = 3; /* 3 mean linux*/ 861bd756ddeSShun Fu if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) 862bd756ddeSShun Fu hs_page2->host_cap = 0x08;/* host dynamic source mode */ 863f0c568a4SJianyun Li hs_page2->host_ver.ver_major = VER_MAJOR; 864f0c568a4SJianyun Li hs_page2->host_ver.ver_minor = VER_MINOR; 865f0c568a4SJianyun Li hs_page2->host_ver.ver_oem = VER_OEM; 866f0c568a4SJianyun Li hs_page2->host_ver.ver_build = VER_BUILD; 867f0c568a4SJianyun Li hs_page2->system_io_bus = 0; 868f0c568a4SJianyun Li hs_page2->slot_number = 0; 869f0c568a4SJianyun Li hs_page2->intr_level = 0; 870f0c568a4SJianyun Li hs_page2->intr_vector = 0; 87136f8ef7fSTina Ruchandani time = ktime_get_real_seconds(); 87236f8ef7fSTina Ruchandani local_time = (time - (sys_tz.tz_minuteswest * 60)); 873f0c568a4SJianyun Li hs_page2->seconds_since1970 = local_time; 874f0c568a4SJianyun Li hs_header->checksum = mvumi_calculate_checksum(hs_header, 875f0c568a4SJianyun Li hs_header->frame_length); 876f0c568a4SJianyun Li break; 877f0c568a4SJianyun Li 878f0c568a4SJianyun Li case HS_PAGE_FIRM_CTL: 879f0c568a4SJianyun Li hs_page3 = (struct mvumi_hs_page3 *) hs_header; 880f0c568a4SJianyun Li hs_header->frame_length = sizeof(*hs_page3) - 4; 881f0c568a4SJianyun Li memset(hs_header->frame_content, 0, hs_header->frame_length); 882f0c568a4SJianyun Li hs_header->checksum = mvumi_calculate_checksum(hs_header, 883f0c568a4SJianyun Li hs_header->frame_length); 884f0c568a4SJianyun Li break; 885f0c568a4SJianyun Li 886f0c568a4SJianyun Li case HS_PAGE_CL_INFO: 887f0c568a4SJianyun Li hs_page4 = (struct mvumi_hs_page4 *) hs_header; 888f0c568a4SJianyun Li hs_header->frame_length = sizeof(*hs_page4) - 4; 889f0c568a4SJianyun Li memset(hs_header->frame_content, 0, hs_header->frame_length); 890f0c568a4SJianyun Li hs_page4->ib_baseaddr_l = lower_32_bits(mhba->ib_list_phys); 891f0c568a4SJianyun Li hs_page4->ib_baseaddr_h = upper_32_bits(mhba->ib_list_phys); 892f0c568a4SJianyun Li 893f0c568a4SJianyun Li hs_page4->ob_baseaddr_l = lower_32_bits(mhba->ob_list_phys); 894f0c568a4SJianyun Li hs_page4->ob_baseaddr_h = upper_32_bits(mhba->ob_list_phys); 895f0c568a4SJianyun Li hs_page4->ib_entry_size = mhba->ib_max_size_setting; 896f0c568a4SJianyun Li hs_page4->ob_entry_size = mhba->ob_max_size_setting; 897bd756ddeSShun Fu if (mhba->hba_capability 898bd756ddeSShun Fu & HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF) { 899bd756ddeSShun Fu hs_page4->ob_depth = find_first_bit((unsigned long *) 900bd756ddeSShun Fu &mhba->list_num_io, 901bd756ddeSShun Fu BITS_PER_LONG); 902bd756ddeSShun Fu hs_page4->ib_depth = find_first_bit((unsigned long *) 903bd756ddeSShun Fu &mhba->list_num_io, 904bd756ddeSShun Fu BITS_PER_LONG); 905bd756ddeSShun Fu } else { 906bd756ddeSShun Fu hs_page4->ob_depth = (u8) mhba->list_num_io; 907bd756ddeSShun Fu hs_page4->ib_depth = (u8) mhba->list_num_io; 908bd756ddeSShun Fu } 909f0c568a4SJianyun Li hs_header->checksum = mvumi_calculate_checksum(hs_header, 910f0c568a4SJianyun Li hs_header->frame_length); 911f0c568a4SJianyun Li break; 912f0c568a4SJianyun Li 913f0c568a4SJianyun Li default: 914f0c568a4SJianyun Li dev_err(&mhba->pdev->dev, "cannot build page, code[0x%x]\n", 915f0c568a4SJianyun Li hs_header->page_code); 916f0c568a4SJianyun Li break; 917f0c568a4SJianyun Li } 918f0c568a4SJianyun Li } 919f0c568a4SJianyun Li 920f0c568a4SJianyun Li /** 921f0c568a4SJianyun Li * mvumi_init_data - Initialize requested date for FW 922f0c568a4SJianyun Li * @mhba: Adapter soft state 923f0c568a4SJianyun Li */ 924f0c568a4SJianyun Li static int mvumi_init_data(struct mvumi_hba *mhba) 925f0c568a4SJianyun Li { 926f0c568a4SJianyun Li struct mvumi_ob_data *ob_pool; 927f0c568a4SJianyun Li struct mvumi_res *res_mgnt; 928f0c568a4SJianyun Li unsigned int tmp_size, offset, i; 929f0c568a4SJianyun Li void *virmem, *v; 930f0c568a4SJianyun Li dma_addr_t p; 931f0c568a4SJianyun Li 932f0c568a4SJianyun Li if (mhba->fw_flag & MVUMI_FW_ALLOC) 933f0c568a4SJianyun Li return 0; 934f0c568a4SJianyun Li 935f0c568a4SJianyun Li tmp_size = mhba->ib_max_size * mhba->max_io; 936bd756ddeSShun Fu if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) 937bd756ddeSShun Fu tmp_size += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io; 938bd756ddeSShun Fu 939f0c568a4SJianyun Li tmp_size += 128 + mhba->ob_max_size * mhba->max_io; 940bd756ddeSShun Fu tmp_size += 8 + sizeof(u32)*2 + 16; 941f0c568a4SJianyun Li 942f0c568a4SJianyun Li res_mgnt = mvumi_alloc_mem_resource(mhba, 943f0c568a4SJianyun Li RESOURCE_UNCACHED_MEMORY, tmp_size); 944f0c568a4SJianyun Li if (!res_mgnt) { 945f0c568a4SJianyun Li dev_err(&mhba->pdev->dev, 946f0c568a4SJianyun Li "failed to allocate memory for inbound list\n"); 947f0c568a4SJianyun Li goto fail_alloc_dma_buf; 948f0c568a4SJianyun Li } 949f0c568a4SJianyun Li 950f0c568a4SJianyun Li p = res_mgnt->bus_addr; 951f0c568a4SJianyun Li v = res_mgnt->virt_addr; 952f0c568a4SJianyun Li /* ib_list */ 953f0c568a4SJianyun Li offset = round_up(p, 128) - p; 954f0c568a4SJianyun Li p += offset; 955f0c568a4SJianyun Li v += offset; 956f0c568a4SJianyun Li mhba->ib_list = v; 957f0c568a4SJianyun Li mhba->ib_list_phys = p; 958bd756ddeSShun Fu if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) { 959bd756ddeSShun Fu v += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io; 960bd756ddeSShun Fu p += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io; 961bd756ddeSShun Fu mhba->ib_frame = v; 962bd756ddeSShun Fu mhba->ib_frame_phys = p; 963bd756ddeSShun Fu } 964f0c568a4SJianyun Li v += mhba->ib_max_size * mhba->max_io; 965f0c568a4SJianyun Li p += mhba->ib_max_size * mhba->max_io; 966bd756ddeSShun Fu 967f0c568a4SJianyun Li /* ib shadow */ 968f0c568a4SJianyun Li offset = round_up(p, 8) - p; 969f0c568a4SJianyun Li p += offset; 970f0c568a4SJianyun Li v += offset; 971f0c568a4SJianyun Li mhba->ib_shadow = v; 972f0c568a4SJianyun Li mhba->ib_shadow_phys = p; 973bd756ddeSShun Fu p += sizeof(u32)*2; 974bd756ddeSShun Fu v += sizeof(u32)*2; 975f0c568a4SJianyun Li /* ob shadow */ 976bd756ddeSShun Fu if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) { 977f0c568a4SJianyun Li offset = round_up(p, 8) - p; 978f0c568a4SJianyun Li p += offset; 979f0c568a4SJianyun Li v += offset; 980f0c568a4SJianyun Li mhba->ob_shadow = v; 981f0c568a4SJianyun Li mhba->ob_shadow_phys = p; 982f0c568a4SJianyun Li p += 8; 983f0c568a4SJianyun Li v += 8; 984bd756ddeSShun Fu } else { 985bd756ddeSShun Fu offset = round_up(p, 4) - p; 986bd756ddeSShun Fu p += offset; 987bd756ddeSShun Fu v += offset; 988bd756ddeSShun Fu mhba->ob_shadow = v; 989bd756ddeSShun Fu mhba->ob_shadow_phys = p; 990bd756ddeSShun Fu p += 4; 991bd756ddeSShun Fu v += 4; 992bd756ddeSShun Fu } 993f0c568a4SJianyun Li 994f0c568a4SJianyun Li /* ob list */ 995f0c568a4SJianyun Li offset = round_up(p, 128) - p; 996f0c568a4SJianyun Li p += offset; 997f0c568a4SJianyun Li v += offset; 998f0c568a4SJianyun Li 999f0c568a4SJianyun Li mhba->ob_list = v; 1000f0c568a4SJianyun Li mhba->ob_list_phys = p; 1001f0c568a4SJianyun Li 1002f0c568a4SJianyun Li /* ob data pool */ 1003f0c568a4SJianyun Li tmp_size = mhba->max_io * (mhba->ob_max_size + sizeof(*ob_pool)); 1004f0c568a4SJianyun Li tmp_size = round_up(tmp_size, 8); 1005f0c568a4SJianyun Li 1006f0c568a4SJianyun Li res_mgnt = mvumi_alloc_mem_resource(mhba, 1007f0c568a4SJianyun Li RESOURCE_CACHED_MEMORY, tmp_size); 1008f0c568a4SJianyun Li if (!res_mgnt) { 1009f0c568a4SJianyun Li dev_err(&mhba->pdev->dev, 1010f0c568a4SJianyun Li "failed to allocate memory for outbound data buffer\n"); 1011f0c568a4SJianyun Li goto fail_alloc_dma_buf; 1012f0c568a4SJianyun Li } 1013f0c568a4SJianyun Li virmem = res_mgnt->virt_addr; 1014f0c568a4SJianyun Li 1015f0c568a4SJianyun Li for (i = mhba->max_io; i != 0; i--) { 1016f0c568a4SJianyun Li ob_pool = (struct mvumi_ob_data *) virmem; 1017f0c568a4SJianyun Li list_add_tail(&ob_pool->list, &mhba->ob_data_list); 1018f0c568a4SJianyun Li virmem += mhba->ob_max_size + sizeof(*ob_pool); 1019f0c568a4SJianyun Li } 1020f0c568a4SJianyun Li 1021f0c568a4SJianyun Li tmp_size = sizeof(unsigned short) * mhba->max_io + 1022f0c568a4SJianyun Li sizeof(struct mvumi_cmd *) * mhba->max_io; 1023f0c568a4SJianyun Li tmp_size += round_up(mhba->max_target_id, sizeof(unsigned char) * 8) / 1024f0c568a4SJianyun Li (sizeof(unsigned char) * 8); 1025f0c568a4SJianyun Li 1026f0c568a4SJianyun Li res_mgnt = mvumi_alloc_mem_resource(mhba, 1027f0c568a4SJianyun Li RESOURCE_CACHED_MEMORY, tmp_size); 1028f0c568a4SJianyun Li if (!res_mgnt) { 1029f0c568a4SJianyun Li dev_err(&mhba->pdev->dev, 1030f0c568a4SJianyun Li "failed to allocate memory for tag and target map\n"); 1031f0c568a4SJianyun Li goto fail_alloc_dma_buf; 1032f0c568a4SJianyun Li } 1033f0c568a4SJianyun Li 1034f0c568a4SJianyun Li virmem = res_mgnt->virt_addr; 1035f0c568a4SJianyun Li mhba->tag_pool.stack = virmem; 1036f0c568a4SJianyun Li mhba->tag_pool.size = mhba->max_io; 1037f0c568a4SJianyun Li tag_init(&mhba->tag_pool, mhba->max_io); 1038f0c568a4SJianyun Li virmem += sizeof(unsigned short) * mhba->max_io; 1039f0c568a4SJianyun Li 1040f0c568a4SJianyun Li mhba->tag_cmd = virmem; 1041f0c568a4SJianyun Li virmem += sizeof(struct mvumi_cmd *) * mhba->max_io; 1042f0c568a4SJianyun Li 1043f0c568a4SJianyun Li mhba->target_map = virmem; 1044f0c568a4SJianyun Li 1045f0c568a4SJianyun Li mhba->fw_flag |= MVUMI_FW_ALLOC; 1046f0c568a4SJianyun Li return 0; 1047f0c568a4SJianyun Li 1048f0c568a4SJianyun Li fail_alloc_dma_buf: 1049f0c568a4SJianyun Li mvumi_release_mem_resource(mhba); 1050f0c568a4SJianyun Li return -1; 1051f0c568a4SJianyun Li } 1052f0c568a4SJianyun Li 1053f0c568a4SJianyun Li static int mvumi_hs_process_page(struct mvumi_hba *mhba, 1054f0c568a4SJianyun Li struct mvumi_hs_header *hs_header) 1055f0c568a4SJianyun Li { 1056f0c568a4SJianyun Li struct mvumi_hs_page1 *hs_page1; 1057f0c568a4SJianyun Li unsigned char page_checksum; 1058f0c568a4SJianyun Li 1059f0c568a4SJianyun Li page_checksum = mvumi_calculate_checksum(hs_header, 1060f0c568a4SJianyun Li hs_header->frame_length); 1061f0c568a4SJianyun Li if (page_checksum != hs_header->checksum) { 1062f0c568a4SJianyun Li dev_err(&mhba->pdev->dev, "checksum error\n"); 1063f0c568a4SJianyun Li return -1; 1064f0c568a4SJianyun Li } 1065f0c568a4SJianyun Li 1066f0c568a4SJianyun Li switch (hs_header->page_code) { 1067f0c568a4SJianyun Li case HS_PAGE_FIRM_CAP: 1068f0c568a4SJianyun Li hs_page1 = (struct mvumi_hs_page1 *) hs_header; 1069f0c568a4SJianyun Li 1070f0c568a4SJianyun Li mhba->max_io = hs_page1->max_io_support; 1071f0c568a4SJianyun Li mhba->list_num_io = hs_page1->cl_inout_list_depth; 1072f0c568a4SJianyun Li mhba->max_transfer_size = hs_page1->max_transfer_size; 1073f0c568a4SJianyun Li mhba->max_target_id = hs_page1->max_devices_support; 1074f0c568a4SJianyun Li mhba->hba_capability = hs_page1->capability; 1075f0c568a4SJianyun Li mhba->ib_max_size_setting = hs_page1->cl_in_max_entry_size; 1076f0c568a4SJianyun Li mhba->ib_max_size = (1 << hs_page1->cl_in_max_entry_size) << 2; 1077f0c568a4SJianyun Li 1078f0c568a4SJianyun Li mhba->ob_max_size_setting = hs_page1->cl_out_max_entry_size; 1079f0c568a4SJianyun Li mhba->ob_max_size = (1 << hs_page1->cl_out_max_entry_size) << 2; 1080f0c568a4SJianyun Li 1081f0c568a4SJianyun Li dev_dbg(&mhba->pdev->dev, "FW version:%d\n", 1082f0c568a4SJianyun Li hs_page1->fw_ver.ver_build); 1083f0c568a4SJianyun Li 1084bd756ddeSShun Fu if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_COMPACT_SG) 1085bd756ddeSShun Fu mhba->eot_flag = 22; 1086bd756ddeSShun Fu else 1087bd756ddeSShun Fu mhba->eot_flag = 27; 1088bd756ddeSShun Fu if (mhba->hba_capability & HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF) 1089bd756ddeSShun Fu mhba->list_num_io = 1 << hs_page1->cl_inout_list_depth; 1090f0c568a4SJianyun Li break; 1091f0c568a4SJianyun Li default: 1092f0c568a4SJianyun Li dev_err(&mhba->pdev->dev, "handshake: page code error\n"); 1093f0c568a4SJianyun Li return -1; 1094f0c568a4SJianyun Li } 1095f0c568a4SJianyun Li return 0; 1096f0c568a4SJianyun Li } 1097f0c568a4SJianyun Li 1098f0c568a4SJianyun Li /** 1099f0c568a4SJianyun Li * mvumi_handshake - Move the FW to READY state 1100f0c568a4SJianyun Li * @mhba: Adapter soft state 1101f0c568a4SJianyun Li * 1102f0c568a4SJianyun Li * During the initialization, FW passes can potentially be in any one of 1103f0c568a4SJianyun Li * several possible states. If the FW in operational, waiting-for-handshake 1104f0c568a4SJianyun Li * states, driver must take steps to bring it to ready state. Otherwise, it 1105f0c568a4SJianyun Li * has to wait for the ready state. 1106f0c568a4SJianyun Li */ 1107f0c568a4SJianyun Li static int mvumi_handshake(struct mvumi_hba *mhba) 1108f0c568a4SJianyun Li { 1109f0c568a4SJianyun Li unsigned int hs_state, tmp, hs_fun; 1110f0c568a4SJianyun Li struct mvumi_hs_header *hs_header; 1111bd756ddeSShun Fu struct mvumi_hw_regs *regs = mhba->regs; 1112f0c568a4SJianyun Li 1113f0c568a4SJianyun Li if (mhba->fw_state == FW_STATE_STARTING) 1114f0c568a4SJianyun Li hs_state = HS_S_START; 1115f0c568a4SJianyun Li else { 1116bd756ddeSShun Fu tmp = ioread32(regs->arm_to_pciea_msg0); 1117f0c568a4SJianyun Li hs_state = HS_GET_STATE(tmp); 1118f0c568a4SJianyun Li dev_dbg(&mhba->pdev->dev, "handshake state[0x%x].\n", hs_state); 1119f0c568a4SJianyun Li if (HS_GET_STATUS(tmp) != HS_STATUS_OK) { 1120f0c568a4SJianyun Li mhba->fw_state = FW_STATE_STARTING; 1121f0c568a4SJianyun Li return -1; 1122f0c568a4SJianyun Li } 1123f0c568a4SJianyun Li } 1124f0c568a4SJianyun Li 1125f0c568a4SJianyun Li hs_fun = 0; 1126f0c568a4SJianyun Li switch (hs_state) { 1127f0c568a4SJianyun Li case HS_S_START: 1128f0c568a4SJianyun Li mhba->fw_state = FW_STATE_HANDSHAKING; 1129f0c568a4SJianyun Li HS_SET_STATUS(hs_fun, HS_STATUS_OK); 1130f0c568a4SJianyun Li HS_SET_STATE(hs_fun, HS_S_RESET); 1131bd756ddeSShun Fu iowrite32(HANDSHAKE_SIGNATURE, regs->pciea_to_arm_msg1); 1132bd756ddeSShun Fu iowrite32(hs_fun, regs->pciea_to_arm_msg0); 1133bd756ddeSShun Fu iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg); 1134f0c568a4SJianyun Li break; 1135f0c568a4SJianyun Li 1136f0c568a4SJianyun Li case HS_S_RESET: 1137f0c568a4SJianyun Li iowrite32(lower_32_bits(mhba->handshake_page_phys), 1138bd756ddeSShun Fu regs->pciea_to_arm_msg1); 1139f0c568a4SJianyun Li iowrite32(upper_32_bits(mhba->handshake_page_phys), 1140bd756ddeSShun Fu regs->arm_to_pciea_msg1); 1141f0c568a4SJianyun Li HS_SET_STATUS(hs_fun, HS_STATUS_OK); 1142f0c568a4SJianyun Li HS_SET_STATE(hs_fun, HS_S_PAGE_ADDR); 1143bd756ddeSShun Fu iowrite32(hs_fun, regs->pciea_to_arm_msg0); 1144bd756ddeSShun Fu iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg); 1145f0c568a4SJianyun Li break; 1146f0c568a4SJianyun Li 1147f0c568a4SJianyun Li case HS_S_PAGE_ADDR: 1148f0c568a4SJianyun Li case HS_S_QUERY_PAGE: 1149f0c568a4SJianyun Li case HS_S_SEND_PAGE: 1150f0c568a4SJianyun Li hs_header = (struct mvumi_hs_header *) mhba->handshake_page; 1151f0c568a4SJianyun Li if (hs_header->page_code == HS_PAGE_FIRM_CAP) { 1152f0c568a4SJianyun Li mhba->hba_total_pages = 1153f0c568a4SJianyun Li ((struct mvumi_hs_page1 *) hs_header)->total_pages; 1154f0c568a4SJianyun Li 1155f0c568a4SJianyun Li if (mhba->hba_total_pages == 0) 1156f0c568a4SJianyun Li mhba->hba_total_pages = HS_PAGE_TOTAL-1; 1157f0c568a4SJianyun Li } 1158f0c568a4SJianyun Li 1159f0c568a4SJianyun Li if (hs_state == HS_S_QUERY_PAGE) { 1160f0c568a4SJianyun Li if (mvumi_hs_process_page(mhba, hs_header)) { 1161f0c568a4SJianyun Li HS_SET_STATE(hs_fun, HS_S_ABORT); 1162f0c568a4SJianyun Li return -1; 1163f0c568a4SJianyun Li } 1164f0c568a4SJianyun Li if (mvumi_init_data(mhba)) { 1165f0c568a4SJianyun Li HS_SET_STATE(hs_fun, HS_S_ABORT); 1166f0c568a4SJianyun Li return -1; 1167f0c568a4SJianyun Li } 1168f0c568a4SJianyun Li } else if (hs_state == HS_S_PAGE_ADDR) { 1169f0c568a4SJianyun Li hs_header->page_code = 0; 1170f0c568a4SJianyun Li mhba->hba_total_pages = HS_PAGE_TOTAL-1; 1171f0c568a4SJianyun Li } 1172f0c568a4SJianyun Li 1173f0c568a4SJianyun Li if ((hs_header->page_code + 1) <= mhba->hba_total_pages) { 1174f0c568a4SJianyun Li hs_header->page_code++; 1175f0c568a4SJianyun Li if (hs_header->page_code != HS_PAGE_FIRM_CAP) { 1176f0c568a4SJianyun Li mvumi_hs_build_page(mhba, hs_header); 1177f0c568a4SJianyun Li HS_SET_STATE(hs_fun, HS_S_SEND_PAGE); 1178f0c568a4SJianyun Li } else 1179f0c568a4SJianyun Li HS_SET_STATE(hs_fun, HS_S_QUERY_PAGE); 1180f0c568a4SJianyun Li } else 1181f0c568a4SJianyun Li HS_SET_STATE(hs_fun, HS_S_END); 1182f0c568a4SJianyun Li 1183f0c568a4SJianyun Li HS_SET_STATUS(hs_fun, HS_STATUS_OK); 1184bd756ddeSShun Fu iowrite32(hs_fun, regs->pciea_to_arm_msg0); 1185bd756ddeSShun Fu iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg); 1186f0c568a4SJianyun Li break; 1187f0c568a4SJianyun Li 1188f0c568a4SJianyun Li case HS_S_END: 1189f0c568a4SJianyun Li /* Set communication list ISR */ 1190bd756ddeSShun Fu tmp = ioread32(regs->enpointa_mask_reg); 1191bd756ddeSShun Fu tmp |= regs->int_comaout | regs->int_comaerr; 1192bd756ddeSShun Fu iowrite32(tmp, regs->enpointa_mask_reg); 1193f0c568a4SJianyun Li iowrite32(mhba->list_num_io, mhba->ib_shadow); 119459e13d48SMasanari Iida /* Set InBound List Available count shadow */ 1195f0c568a4SJianyun Li iowrite32(lower_32_bits(mhba->ib_shadow_phys), 1196bd756ddeSShun Fu regs->inb_aval_count_basel); 1197f0c568a4SJianyun Li iowrite32(upper_32_bits(mhba->ib_shadow_phys), 1198bd756ddeSShun Fu regs->inb_aval_count_baseh); 1199f0c568a4SJianyun Li 1200bd756ddeSShun Fu if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143) { 120159e13d48SMasanari Iida /* Set OutBound List Available count shadow */ 1202bd756ddeSShun Fu iowrite32((mhba->list_num_io-1) | 1203bd756ddeSShun Fu regs->cl_pointer_toggle, 1204f0c568a4SJianyun Li mhba->ob_shadow); 1205bd756ddeSShun Fu iowrite32(lower_32_bits(mhba->ob_shadow_phys), 1206bd756ddeSShun Fu regs->outb_copy_basel); 1207bd756ddeSShun Fu iowrite32(upper_32_bits(mhba->ob_shadow_phys), 1208bd756ddeSShun Fu regs->outb_copy_baseh); 1209bd756ddeSShun Fu } 1210f0c568a4SJianyun Li 1211bd756ddeSShun Fu mhba->ib_cur_slot = (mhba->list_num_io - 1) | 1212bd756ddeSShun Fu regs->cl_pointer_toggle; 1213bd756ddeSShun Fu mhba->ob_cur_slot = (mhba->list_num_io - 1) | 1214bd756ddeSShun Fu regs->cl_pointer_toggle; 1215f0c568a4SJianyun Li mhba->fw_state = FW_STATE_STARTED; 1216f0c568a4SJianyun Li 1217f0c568a4SJianyun Li break; 1218f0c568a4SJianyun Li default: 1219f0c568a4SJianyun Li dev_err(&mhba->pdev->dev, "unknown handshake state [0x%x].\n", 1220f0c568a4SJianyun Li hs_state); 1221f0c568a4SJianyun Li return -1; 1222f0c568a4SJianyun Li } 1223f0c568a4SJianyun Li return 0; 1224f0c568a4SJianyun Li } 1225f0c568a4SJianyun Li 1226f0c568a4SJianyun Li static unsigned char mvumi_handshake_event(struct mvumi_hba *mhba) 1227f0c568a4SJianyun Li { 1228f0c568a4SJianyun Li unsigned int isr_status; 1229f0c568a4SJianyun Li unsigned long before; 1230f0c568a4SJianyun Li 1231f0c568a4SJianyun Li before = jiffies; 1232f0c568a4SJianyun Li mvumi_handshake(mhba); 1233f0c568a4SJianyun Li do { 1234bd756ddeSShun Fu isr_status = mhba->instancet->read_fw_status_reg(mhba); 1235f0c568a4SJianyun Li 1236f0c568a4SJianyun Li if (mhba->fw_state == FW_STATE_STARTED) 1237f0c568a4SJianyun Li return 0; 1238f0c568a4SJianyun Li if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) { 1239f0c568a4SJianyun Li dev_err(&mhba->pdev->dev, 1240f0c568a4SJianyun Li "no handshake response at state 0x%x.\n", 1241f0c568a4SJianyun Li mhba->fw_state); 1242f0c568a4SJianyun Li dev_err(&mhba->pdev->dev, 1243f0c568a4SJianyun Li "isr : global=0x%x,status=0x%x.\n", 1244f0c568a4SJianyun Li mhba->global_isr, isr_status); 1245f0c568a4SJianyun Li return -1; 1246f0c568a4SJianyun Li } 1247f0c568a4SJianyun Li rmb(); 1248f0c568a4SJianyun Li usleep_range(1000, 2000); 1249f0c568a4SJianyun Li } while (!(isr_status & DRBL_HANDSHAKE_ISR)); 1250f0c568a4SJianyun Li 1251f0c568a4SJianyun Li return 0; 1252f0c568a4SJianyun Li } 1253f0c568a4SJianyun Li 1254f0c568a4SJianyun Li static unsigned char mvumi_check_handshake(struct mvumi_hba *mhba) 1255f0c568a4SJianyun Li { 1256f0c568a4SJianyun Li unsigned int tmp; 1257f0c568a4SJianyun Li unsigned long before; 1258f0c568a4SJianyun Li 1259f0c568a4SJianyun Li before = jiffies; 1260bd756ddeSShun Fu tmp = ioread32(mhba->regs->arm_to_pciea_msg1); 1261f0c568a4SJianyun Li while ((tmp != HANDSHAKE_READYSTATE) && (tmp != HANDSHAKE_DONESTATE)) { 1262f0c568a4SJianyun Li if (tmp != HANDSHAKE_READYSTATE) 1263f0c568a4SJianyun Li iowrite32(DRBL_MU_RESET, 1264bd756ddeSShun Fu mhba->regs->pciea_to_arm_drbl_reg); 1265f0c568a4SJianyun Li if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) { 1266f0c568a4SJianyun Li dev_err(&mhba->pdev->dev, 1267f0c568a4SJianyun Li "invalid signature [0x%x].\n", tmp); 1268f0c568a4SJianyun Li return -1; 1269f0c568a4SJianyun Li } 1270f0c568a4SJianyun Li usleep_range(1000, 2000); 1271f0c568a4SJianyun Li rmb(); 1272bd756ddeSShun Fu tmp = ioread32(mhba->regs->arm_to_pciea_msg1); 1273f0c568a4SJianyun Li } 1274f0c568a4SJianyun Li 1275f0c568a4SJianyun Li mhba->fw_state = FW_STATE_STARTING; 1276f0c568a4SJianyun Li dev_dbg(&mhba->pdev->dev, "start firmware handshake...\n"); 1277f0c568a4SJianyun Li do { 1278f0c568a4SJianyun Li if (mvumi_handshake_event(mhba)) { 1279f0c568a4SJianyun Li dev_err(&mhba->pdev->dev, 1280f0c568a4SJianyun Li "handshake failed at state 0x%x.\n", 1281f0c568a4SJianyun Li mhba->fw_state); 1282f0c568a4SJianyun Li return -1; 1283f0c568a4SJianyun Li } 1284f0c568a4SJianyun Li } while (mhba->fw_state != FW_STATE_STARTED); 1285f0c568a4SJianyun Li 1286f0c568a4SJianyun Li dev_dbg(&mhba->pdev->dev, "firmware handshake done\n"); 1287f0c568a4SJianyun Li 1288f0c568a4SJianyun Li return 0; 1289f0c568a4SJianyun Li } 1290f0c568a4SJianyun Li 1291f0c568a4SJianyun Li static unsigned char mvumi_start(struct mvumi_hba *mhba) 1292f0c568a4SJianyun Li { 1293f0c568a4SJianyun Li unsigned int tmp; 1294bd756ddeSShun Fu struct mvumi_hw_regs *regs = mhba->regs; 1295f0c568a4SJianyun Li 1296bd756ddeSShun Fu /* clear Door bell */ 1297bd756ddeSShun Fu tmp = ioread32(regs->arm_to_pciea_drbl_reg); 1298bd756ddeSShun Fu iowrite32(tmp, regs->arm_to_pciea_drbl_reg); 1299bd756ddeSShun Fu 1300bd756ddeSShun Fu iowrite32(regs->int_drbl_int_mask, regs->arm_to_pciea_mask_reg); 1301bd756ddeSShun Fu tmp = ioread32(regs->enpointa_mask_reg) | regs->int_dl_cpu2pciea; 1302bd756ddeSShun Fu iowrite32(tmp, regs->enpointa_mask_reg); 1303bd756ddeSShun Fu msleep(100); 1304f0c568a4SJianyun Li if (mvumi_check_handshake(mhba)) 1305f0c568a4SJianyun Li return -1; 1306f0c568a4SJianyun Li 1307f0c568a4SJianyun Li return 0; 1308f0c568a4SJianyun Li } 1309f0c568a4SJianyun Li 1310f0c568a4SJianyun Li /** 1311f0c568a4SJianyun Li * mvumi_complete_cmd - Completes a command 1312f0c568a4SJianyun Li * @mhba: Adapter soft state 1313f0c568a4SJianyun Li * @cmd: Command to be completed 1314f0c568a4SJianyun Li */ 1315f0c568a4SJianyun Li static void mvumi_complete_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd, 1316f0c568a4SJianyun Li struct mvumi_rsp_frame *ob_frame) 1317f0c568a4SJianyun Li { 1318f0c568a4SJianyun Li struct scsi_cmnd *scmd = cmd->scmd; 1319f0c568a4SJianyun Li 1320f0c568a4SJianyun Li cmd->scmd->SCp.ptr = NULL; 1321f0c568a4SJianyun Li scmd->result = ob_frame->req_status; 1322f0c568a4SJianyun Li 1323f0c568a4SJianyun Li switch (ob_frame->req_status) { 1324f0c568a4SJianyun Li case SAM_STAT_GOOD: 1325f0c568a4SJianyun Li scmd->result |= DID_OK << 16; 1326f0c568a4SJianyun Li break; 1327f0c568a4SJianyun Li case SAM_STAT_BUSY: 1328f0c568a4SJianyun Li scmd->result |= DID_BUS_BUSY << 16; 1329f0c568a4SJianyun Li break; 1330f0c568a4SJianyun Li case SAM_STAT_CHECK_CONDITION: 1331f0c568a4SJianyun Li scmd->result |= (DID_OK << 16); 1332f0c568a4SJianyun Li if (ob_frame->rsp_flag & CL_RSP_FLAG_SENSEDATA) { 1333f0c568a4SJianyun Li memcpy(cmd->scmd->sense_buffer, ob_frame->payload, 1334f0c568a4SJianyun Li sizeof(struct mvumi_sense_data)); 1335f0c568a4SJianyun Li scmd->result |= (DRIVER_SENSE << 24); 1336f0c568a4SJianyun Li } 1337f0c568a4SJianyun Li break; 1338f0c568a4SJianyun Li default: 1339f0c568a4SJianyun Li scmd->result |= (DRIVER_INVALID << 24) | (DID_ABORT << 16); 1340f0c568a4SJianyun Li break; 1341f0c568a4SJianyun Li } 1342f0c568a4SJianyun Li 13434bd13a07SAlexey Khoroshilov if (scsi_bufflen(scmd)) 1344*ab8e7f4bSChristoph Hellwig dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd), 1345f0c568a4SJianyun Li scsi_sg_count(scmd), 1346*ab8e7f4bSChristoph Hellwig scmd->sc_data_direction); 1347f0c568a4SJianyun Li cmd->scmd->scsi_done(scmd); 1348f0c568a4SJianyun Li mvumi_return_cmd(mhba, cmd); 1349f0c568a4SJianyun Li } 1350bd756ddeSShun Fu 1351f0c568a4SJianyun Li static void mvumi_complete_internal_cmd(struct mvumi_hba *mhba, 1352f0c568a4SJianyun Li struct mvumi_cmd *cmd, 1353f0c568a4SJianyun Li struct mvumi_rsp_frame *ob_frame) 1354f0c568a4SJianyun Li { 1355f0c568a4SJianyun Li if (atomic_read(&cmd->sync_cmd)) { 1356f0c568a4SJianyun Li cmd->cmd_status = ob_frame->req_status; 1357f0c568a4SJianyun Li 1358f0c568a4SJianyun Li if ((ob_frame->req_status == SAM_STAT_CHECK_CONDITION) && 1359f0c568a4SJianyun Li (ob_frame->rsp_flag & CL_RSP_FLAG_SENSEDATA) && 1360f0c568a4SJianyun Li cmd->data_buf) { 1361f0c568a4SJianyun Li memcpy(cmd->data_buf, ob_frame->payload, 1362f0c568a4SJianyun Li sizeof(struct mvumi_sense_data)); 1363f0c568a4SJianyun Li } 1364f0c568a4SJianyun Li atomic_dec(&cmd->sync_cmd); 1365f0c568a4SJianyun Li wake_up(&mhba->int_cmd_wait_q); 1366f0c568a4SJianyun Li } 1367f0c568a4SJianyun Li } 1368f0c568a4SJianyun Li 1369f0c568a4SJianyun Li static void mvumi_show_event(struct mvumi_hba *mhba, 1370f0c568a4SJianyun Li struct mvumi_driver_event *ptr) 1371f0c568a4SJianyun Li { 1372f0c568a4SJianyun Li unsigned int i; 1373f0c568a4SJianyun Li 1374f0c568a4SJianyun Li dev_warn(&mhba->pdev->dev, 1375f0c568a4SJianyun Li "Event[0x%x] id[0x%x] severity[0x%x] device id[0x%x]\n", 1376f0c568a4SJianyun Li ptr->sequence_no, ptr->event_id, ptr->severity, ptr->device_id); 1377f0c568a4SJianyun Li if (ptr->param_count) { 1378f0c568a4SJianyun Li printk(KERN_WARNING "Event param(len 0x%x): ", 1379f0c568a4SJianyun Li ptr->param_count); 1380f0c568a4SJianyun Li for (i = 0; i < ptr->param_count; i++) 1381f0c568a4SJianyun Li printk(KERN_WARNING "0x%x ", ptr->params[i]); 1382f0c568a4SJianyun Li 1383f0c568a4SJianyun Li printk(KERN_WARNING "\n"); 1384f0c568a4SJianyun Li } 1385f0c568a4SJianyun Li 1386f0c568a4SJianyun Li if (ptr->sense_data_length) { 1387f0c568a4SJianyun Li printk(KERN_WARNING "Event sense data(len 0x%x): ", 1388f0c568a4SJianyun Li ptr->sense_data_length); 1389f0c568a4SJianyun Li for (i = 0; i < ptr->sense_data_length; i++) 1390f0c568a4SJianyun Li printk(KERN_WARNING "0x%x ", ptr->sense_data[i]); 1391f0c568a4SJianyun Li printk(KERN_WARNING "\n"); 1392f0c568a4SJianyun Li } 1393f0c568a4SJianyun Li } 1394f0c568a4SJianyun Li 1395bd756ddeSShun Fu static int mvumi_handle_hotplug(struct mvumi_hba *mhba, u16 devid, int status) 1396bd756ddeSShun Fu { 1397bd756ddeSShun Fu struct scsi_device *sdev; 1398bd756ddeSShun Fu int ret = -1; 1399bd756ddeSShun Fu 1400bd756ddeSShun Fu if (status == DEVICE_OFFLINE) { 1401bd756ddeSShun Fu sdev = scsi_device_lookup(mhba->shost, 0, devid, 0); 1402bd756ddeSShun Fu if (sdev) { 1403bd756ddeSShun Fu dev_dbg(&mhba->pdev->dev, "remove disk %d-%d-%d.\n", 0, 1404bd756ddeSShun Fu sdev->id, 0); 1405bd756ddeSShun Fu scsi_remove_device(sdev); 1406bd756ddeSShun Fu scsi_device_put(sdev); 1407bd756ddeSShun Fu ret = 0; 1408bd756ddeSShun Fu } else 1409bd756ddeSShun Fu dev_err(&mhba->pdev->dev, " no disk[%d] to remove\n", 1410bd756ddeSShun Fu devid); 1411bd756ddeSShun Fu } else if (status == DEVICE_ONLINE) { 1412bd756ddeSShun Fu sdev = scsi_device_lookup(mhba->shost, 0, devid, 0); 1413bd756ddeSShun Fu if (!sdev) { 1414bd756ddeSShun Fu scsi_add_device(mhba->shost, 0, devid, 0); 1415bd756ddeSShun Fu dev_dbg(&mhba->pdev->dev, " add disk %d-%d-%d.\n", 0, 1416bd756ddeSShun Fu devid, 0); 1417bd756ddeSShun Fu ret = 0; 1418bd756ddeSShun Fu } else { 1419bd756ddeSShun Fu dev_err(&mhba->pdev->dev, " don't add disk %d-%d-%d.\n", 1420bd756ddeSShun Fu 0, devid, 0); 1421bd756ddeSShun Fu scsi_device_put(sdev); 1422bd756ddeSShun Fu } 1423bd756ddeSShun Fu } 1424bd756ddeSShun Fu return ret; 1425bd756ddeSShun Fu } 1426bd756ddeSShun Fu 1427bd756ddeSShun Fu static u64 mvumi_inquiry(struct mvumi_hba *mhba, 1428bd756ddeSShun Fu unsigned int id, struct mvumi_cmd *cmd) 1429bd756ddeSShun Fu { 1430bd756ddeSShun Fu struct mvumi_msg_frame *frame; 1431bd756ddeSShun Fu u64 wwid = 0; 1432bd756ddeSShun Fu int cmd_alloc = 0; 1433bd756ddeSShun Fu int data_buf_len = 64; 1434bd756ddeSShun Fu 1435bd756ddeSShun Fu if (!cmd) { 1436bd756ddeSShun Fu cmd = mvumi_create_internal_cmd(mhba, data_buf_len); 1437bd756ddeSShun Fu if (cmd) 1438bd756ddeSShun Fu cmd_alloc = 1; 1439bd756ddeSShun Fu else 1440bd756ddeSShun Fu return 0; 1441bd756ddeSShun Fu } else { 1442bd756ddeSShun Fu memset(cmd->data_buf, 0, data_buf_len); 1443bd756ddeSShun Fu } 1444bd756ddeSShun Fu cmd->scmd = NULL; 1445bd756ddeSShun Fu cmd->cmd_status = REQ_STATUS_PENDING; 1446bd756ddeSShun Fu atomic_set(&cmd->sync_cmd, 0); 1447bd756ddeSShun Fu frame = cmd->frame; 1448bd756ddeSShun Fu frame->device_id = (u16) id; 1449bd756ddeSShun Fu frame->cmd_flag = CMD_FLAG_DATA_IN; 1450bd756ddeSShun Fu frame->req_function = CL_FUN_SCSI_CMD; 1451bd756ddeSShun Fu frame->cdb_length = 6; 1452bd756ddeSShun Fu frame->data_transfer_length = MVUMI_INQUIRY_LENGTH; 1453bd756ddeSShun Fu memset(frame->cdb, 0, frame->cdb_length); 1454bd756ddeSShun Fu frame->cdb[0] = INQUIRY; 1455bd756ddeSShun Fu frame->cdb[4] = frame->data_transfer_length; 1456bd756ddeSShun Fu 1457bd756ddeSShun Fu mvumi_issue_blocked_cmd(mhba, cmd); 1458bd756ddeSShun Fu 1459bd756ddeSShun Fu if (cmd->cmd_status == SAM_STAT_GOOD) { 1460bd756ddeSShun Fu if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143) 1461bd756ddeSShun Fu wwid = id + 1; 1462bd756ddeSShun Fu else 1463bd756ddeSShun Fu memcpy((void *)&wwid, 1464bd756ddeSShun Fu (cmd->data_buf + MVUMI_INQUIRY_UUID_OFF), 1465bd756ddeSShun Fu MVUMI_INQUIRY_UUID_LEN); 1466bd756ddeSShun Fu dev_dbg(&mhba->pdev->dev, 1467bd756ddeSShun Fu "inquiry device(0:%d:0) wwid(%llx)\n", id, wwid); 1468bd756ddeSShun Fu } else { 1469bd756ddeSShun Fu wwid = 0; 1470bd756ddeSShun Fu } 1471bd756ddeSShun Fu if (cmd_alloc) 1472bd756ddeSShun Fu mvumi_delete_internal_cmd(mhba, cmd); 1473bd756ddeSShun Fu 1474bd756ddeSShun Fu return wwid; 1475bd756ddeSShun Fu } 1476bd756ddeSShun Fu 1477bd756ddeSShun Fu static void mvumi_detach_devices(struct mvumi_hba *mhba) 1478bd756ddeSShun Fu { 1479bd756ddeSShun Fu struct mvumi_device *mv_dev = NULL , *dev_next; 1480bd756ddeSShun Fu struct scsi_device *sdev = NULL; 1481bd756ddeSShun Fu 1482bd756ddeSShun Fu mutex_lock(&mhba->device_lock); 1483bd756ddeSShun Fu 1484bd756ddeSShun Fu /* detach Hard Disk */ 1485bd756ddeSShun Fu list_for_each_entry_safe(mv_dev, dev_next, 1486bd756ddeSShun Fu &mhba->shost_dev_list, list) { 1487bd756ddeSShun Fu mvumi_handle_hotplug(mhba, mv_dev->id, DEVICE_OFFLINE); 1488bd756ddeSShun Fu list_del_init(&mv_dev->list); 1489bd756ddeSShun Fu dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)\n", 1490bd756ddeSShun Fu mv_dev->id, mv_dev->wwid); 1491bd756ddeSShun Fu kfree(mv_dev); 1492bd756ddeSShun Fu } 1493bd756ddeSShun Fu list_for_each_entry_safe(mv_dev, dev_next, &mhba->mhba_dev_list, list) { 1494bd756ddeSShun Fu list_del_init(&mv_dev->list); 1495bd756ddeSShun Fu dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)\n", 1496bd756ddeSShun Fu mv_dev->id, mv_dev->wwid); 1497bd756ddeSShun Fu kfree(mv_dev); 1498bd756ddeSShun Fu } 1499bd756ddeSShun Fu 1500bd756ddeSShun Fu /* detach virtual device */ 1501bd756ddeSShun Fu if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) 1502bd756ddeSShun Fu sdev = scsi_device_lookup(mhba->shost, 0, 1503bd756ddeSShun Fu mhba->max_target_id - 1, 0); 1504bd756ddeSShun Fu 1505bd756ddeSShun Fu if (sdev) { 1506bd756ddeSShun Fu scsi_remove_device(sdev); 1507bd756ddeSShun Fu scsi_device_put(sdev); 1508bd756ddeSShun Fu } 1509bd756ddeSShun Fu 1510bd756ddeSShun Fu mutex_unlock(&mhba->device_lock); 1511bd756ddeSShun Fu } 1512bd756ddeSShun Fu 1513bd756ddeSShun Fu static void mvumi_rescan_devices(struct mvumi_hba *mhba, int id) 1514bd756ddeSShun Fu { 1515bd756ddeSShun Fu struct scsi_device *sdev; 1516bd756ddeSShun Fu 1517bd756ddeSShun Fu sdev = scsi_device_lookup(mhba->shost, 0, id, 0); 1518bd756ddeSShun Fu if (sdev) { 1519bd756ddeSShun Fu scsi_rescan_device(&sdev->sdev_gendev); 1520bd756ddeSShun Fu scsi_device_put(sdev); 1521bd756ddeSShun Fu } 1522bd756ddeSShun Fu } 1523bd756ddeSShun Fu 1524bd756ddeSShun Fu static int mvumi_match_devices(struct mvumi_hba *mhba, int id, u64 wwid) 1525bd756ddeSShun Fu { 1526bd756ddeSShun Fu struct mvumi_device *mv_dev = NULL; 1527bd756ddeSShun Fu 1528bd756ddeSShun Fu list_for_each_entry(mv_dev, &mhba->shost_dev_list, list) { 1529bd756ddeSShun Fu if (mv_dev->wwid == wwid) { 1530bd756ddeSShun Fu if (mv_dev->id != id) { 1531bd756ddeSShun Fu dev_err(&mhba->pdev->dev, 1532bd756ddeSShun Fu "%s has same wwid[%llx] ," 1533bd756ddeSShun Fu " but different id[%d %d]\n", 1534bd756ddeSShun Fu __func__, mv_dev->wwid, mv_dev->id, id); 1535bd756ddeSShun Fu return -1; 1536bd756ddeSShun Fu } else { 1537bd756ddeSShun Fu if (mhba->pdev->device == 1538bd756ddeSShun Fu PCI_DEVICE_ID_MARVELL_MV9143) 1539bd756ddeSShun Fu mvumi_rescan_devices(mhba, id); 1540bd756ddeSShun Fu return 1; 1541bd756ddeSShun Fu } 1542bd756ddeSShun Fu } 1543bd756ddeSShun Fu } 1544bd756ddeSShun Fu return 0; 1545bd756ddeSShun Fu } 1546bd756ddeSShun Fu 1547bd756ddeSShun Fu static void mvumi_remove_devices(struct mvumi_hba *mhba, int id) 1548bd756ddeSShun Fu { 1549bd756ddeSShun Fu struct mvumi_device *mv_dev = NULL, *dev_next; 1550bd756ddeSShun Fu 1551bd756ddeSShun Fu list_for_each_entry_safe(mv_dev, dev_next, 1552bd756ddeSShun Fu &mhba->shost_dev_list, list) { 1553bd756ddeSShun Fu if (mv_dev->id == id) { 1554bd756ddeSShun Fu dev_dbg(&mhba->pdev->dev, 1555bd756ddeSShun Fu "detach device(0:%d:0) wwid(%llx) from HOST\n", 1556bd756ddeSShun Fu mv_dev->id, mv_dev->wwid); 1557bd756ddeSShun Fu mvumi_handle_hotplug(mhba, mv_dev->id, DEVICE_OFFLINE); 1558bd756ddeSShun Fu list_del_init(&mv_dev->list); 1559bd756ddeSShun Fu kfree(mv_dev); 1560bd756ddeSShun Fu } 1561bd756ddeSShun Fu } 1562bd756ddeSShun Fu } 1563bd756ddeSShun Fu 1564bd756ddeSShun Fu static int mvumi_probe_devices(struct mvumi_hba *mhba) 1565bd756ddeSShun Fu { 1566bd756ddeSShun Fu int id, maxid; 1567bd756ddeSShun Fu u64 wwid = 0; 1568bd756ddeSShun Fu struct mvumi_device *mv_dev = NULL; 1569bd756ddeSShun Fu struct mvumi_cmd *cmd = NULL; 1570bd756ddeSShun Fu int found = 0; 1571bd756ddeSShun Fu 1572bd756ddeSShun Fu cmd = mvumi_create_internal_cmd(mhba, 64); 1573bd756ddeSShun Fu if (!cmd) 1574bd756ddeSShun Fu return -1; 1575bd756ddeSShun Fu 1576bd756ddeSShun Fu if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143) 1577bd756ddeSShun Fu maxid = mhba->max_target_id; 1578bd756ddeSShun Fu else 1579bd756ddeSShun Fu maxid = mhba->max_target_id - 1; 1580bd756ddeSShun Fu 1581bd756ddeSShun Fu for (id = 0; id < maxid; id++) { 1582bd756ddeSShun Fu wwid = mvumi_inquiry(mhba, id, cmd); 1583bd756ddeSShun Fu if (!wwid) { 1584bd756ddeSShun Fu /* device no response, remove it */ 1585bd756ddeSShun Fu mvumi_remove_devices(mhba, id); 1586bd756ddeSShun Fu } else { 1587bd756ddeSShun Fu /* device response, add it */ 1588bd756ddeSShun Fu found = mvumi_match_devices(mhba, id, wwid); 1589bd756ddeSShun Fu if (!found) { 1590bd756ddeSShun Fu mvumi_remove_devices(mhba, id); 1591bd756ddeSShun Fu mv_dev = kzalloc(sizeof(struct mvumi_device), 1592bd756ddeSShun Fu GFP_KERNEL); 1593bd756ddeSShun Fu if (!mv_dev) { 1594bd756ddeSShun Fu dev_err(&mhba->pdev->dev, 1595bd756ddeSShun Fu "%s alloc mv_dev failed\n", 1596bd756ddeSShun Fu __func__); 1597bd756ddeSShun Fu continue; 1598bd756ddeSShun Fu } 1599bd756ddeSShun Fu mv_dev->id = id; 1600bd756ddeSShun Fu mv_dev->wwid = wwid; 1601bd756ddeSShun Fu mv_dev->sdev = NULL; 1602bd756ddeSShun Fu INIT_LIST_HEAD(&mv_dev->list); 1603bd756ddeSShun Fu list_add_tail(&mv_dev->list, 1604bd756ddeSShun Fu &mhba->mhba_dev_list); 1605bd756ddeSShun Fu dev_dbg(&mhba->pdev->dev, 1606bd756ddeSShun Fu "probe a new device(0:%d:0)" 1607bd756ddeSShun Fu " wwid(%llx)\n", id, mv_dev->wwid); 1608bd756ddeSShun Fu } else if (found == -1) 1609bd756ddeSShun Fu return -1; 1610bd756ddeSShun Fu else 1611bd756ddeSShun Fu continue; 1612bd756ddeSShun Fu } 1613bd756ddeSShun Fu } 1614bd756ddeSShun Fu 1615bd756ddeSShun Fu if (cmd) 1616bd756ddeSShun Fu mvumi_delete_internal_cmd(mhba, cmd); 1617bd756ddeSShun Fu 1618bd756ddeSShun Fu return 0; 1619bd756ddeSShun Fu } 1620bd756ddeSShun Fu 1621bd756ddeSShun Fu static int mvumi_rescan_bus(void *data) 1622bd756ddeSShun Fu { 1623bd756ddeSShun Fu int ret = 0; 1624bd756ddeSShun Fu struct mvumi_hba *mhba = (struct mvumi_hba *) data; 1625bd756ddeSShun Fu struct mvumi_device *mv_dev = NULL , *dev_next; 1626bd756ddeSShun Fu 1627bd756ddeSShun Fu while (!kthread_should_stop()) { 1628bd756ddeSShun Fu 1629bd756ddeSShun Fu set_current_state(TASK_INTERRUPTIBLE); 1630bd756ddeSShun Fu if (!atomic_read(&mhba->pnp_count)) 1631bd756ddeSShun Fu schedule(); 1632bd756ddeSShun Fu msleep(1000); 1633bd756ddeSShun Fu atomic_set(&mhba->pnp_count, 0); 1634bd756ddeSShun Fu __set_current_state(TASK_RUNNING); 1635bd756ddeSShun Fu 1636bd756ddeSShun Fu mutex_lock(&mhba->device_lock); 1637bd756ddeSShun Fu ret = mvumi_probe_devices(mhba); 1638bd756ddeSShun Fu if (!ret) { 1639bd756ddeSShun Fu list_for_each_entry_safe(mv_dev, dev_next, 1640bd756ddeSShun Fu &mhba->mhba_dev_list, list) { 1641bd756ddeSShun Fu if (mvumi_handle_hotplug(mhba, mv_dev->id, 1642bd756ddeSShun Fu DEVICE_ONLINE)) { 1643bd756ddeSShun Fu dev_err(&mhba->pdev->dev, 1644bd756ddeSShun Fu "%s add device(0:%d:0) failed" 1645bd756ddeSShun Fu "wwid(%llx) has exist\n", 1646bd756ddeSShun Fu __func__, 1647bd756ddeSShun Fu mv_dev->id, mv_dev->wwid); 1648bd756ddeSShun Fu list_del_init(&mv_dev->list); 1649bd756ddeSShun Fu kfree(mv_dev); 1650bd756ddeSShun Fu } else { 1651bd756ddeSShun Fu list_move_tail(&mv_dev->list, 1652bd756ddeSShun Fu &mhba->shost_dev_list); 1653bd756ddeSShun Fu } 1654bd756ddeSShun Fu } 1655bd756ddeSShun Fu } 1656bd756ddeSShun Fu mutex_unlock(&mhba->device_lock); 1657bd756ddeSShun Fu } 1658bd756ddeSShun Fu return 0; 1659bd756ddeSShun Fu } 1660bd756ddeSShun Fu 1661bd756ddeSShun Fu static void mvumi_proc_msg(struct mvumi_hba *mhba, 1662bd756ddeSShun Fu struct mvumi_hotplug_event *param) 1663bd756ddeSShun Fu { 1664bd756ddeSShun Fu u16 size = param->size; 1665bd756ddeSShun Fu const unsigned long *ar_bitmap; 1666bd756ddeSShun Fu const unsigned long *re_bitmap; 1667bd756ddeSShun Fu int index; 1668bd756ddeSShun Fu 1669bd756ddeSShun Fu if (mhba->fw_flag & MVUMI_FW_ATTACH) { 1670bd756ddeSShun Fu index = -1; 1671bd756ddeSShun Fu ar_bitmap = (const unsigned long *) param->bitmap; 1672bd756ddeSShun Fu re_bitmap = (const unsigned long *) ¶m->bitmap[size >> 3]; 1673bd756ddeSShun Fu 1674bd756ddeSShun Fu mutex_lock(&mhba->sas_discovery_mutex); 1675bd756ddeSShun Fu do { 1676bd756ddeSShun Fu index = find_next_zero_bit(ar_bitmap, size, index + 1); 1677bd756ddeSShun Fu if (index >= size) 1678bd756ddeSShun Fu break; 1679bd756ddeSShun Fu mvumi_handle_hotplug(mhba, index, DEVICE_ONLINE); 1680bd756ddeSShun Fu } while (1); 1681bd756ddeSShun Fu 1682bd756ddeSShun Fu index = -1; 1683bd756ddeSShun Fu do { 1684bd756ddeSShun Fu index = find_next_zero_bit(re_bitmap, size, index + 1); 1685bd756ddeSShun Fu if (index >= size) 1686bd756ddeSShun Fu break; 1687bd756ddeSShun Fu mvumi_handle_hotplug(mhba, index, DEVICE_OFFLINE); 1688bd756ddeSShun Fu } while (1); 1689bd756ddeSShun Fu mutex_unlock(&mhba->sas_discovery_mutex); 1690bd756ddeSShun Fu } 1691bd756ddeSShun Fu } 1692bd756ddeSShun Fu 1693f0c568a4SJianyun Li static void mvumi_notification(struct mvumi_hba *mhba, u8 msg, void *buffer) 1694f0c568a4SJianyun Li { 1695f0c568a4SJianyun Li if (msg == APICDB1_EVENT_GETEVENT) { 1696f0c568a4SJianyun Li int i, count; 1697f0c568a4SJianyun Li struct mvumi_driver_event *param = NULL; 1698f0c568a4SJianyun Li struct mvumi_event_req *er = buffer; 1699f0c568a4SJianyun Li count = er->count; 1700f0c568a4SJianyun Li if (count > MAX_EVENTS_RETURNED) { 1701f0c568a4SJianyun Li dev_err(&mhba->pdev->dev, "event count[0x%x] is bigger" 1702f0c568a4SJianyun Li " than max event count[0x%x].\n", 1703f0c568a4SJianyun Li count, MAX_EVENTS_RETURNED); 1704f0c568a4SJianyun Li return; 1705f0c568a4SJianyun Li } 1706f0c568a4SJianyun Li for (i = 0; i < count; i++) { 1707f0c568a4SJianyun Li param = &er->events[i]; 1708f0c568a4SJianyun Li mvumi_show_event(mhba, param); 1709f0c568a4SJianyun Li } 1710bd756ddeSShun Fu } else if (msg == APICDB1_HOST_GETEVENT) { 1711bd756ddeSShun Fu mvumi_proc_msg(mhba, buffer); 1712f0c568a4SJianyun Li } 1713f0c568a4SJianyun Li } 1714f0c568a4SJianyun Li 1715f0c568a4SJianyun Li static int mvumi_get_event(struct mvumi_hba *mhba, unsigned char msg) 1716f0c568a4SJianyun Li { 1717f0c568a4SJianyun Li struct mvumi_cmd *cmd; 1718f0c568a4SJianyun Li struct mvumi_msg_frame *frame; 1719f0c568a4SJianyun Li 1720f0c568a4SJianyun Li cmd = mvumi_create_internal_cmd(mhba, 512); 1721f0c568a4SJianyun Li if (!cmd) 1722f0c568a4SJianyun Li return -1; 1723f0c568a4SJianyun Li cmd->scmd = NULL; 1724f0c568a4SJianyun Li cmd->cmd_status = REQ_STATUS_PENDING; 1725f0c568a4SJianyun Li atomic_set(&cmd->sync_cmd, 0); 1726f0c568a4SJianyun Li frame = cmd->frame; 1727f0c568a4SJianyun Li frame->device_id = 0; 1728f0c568a4SJianyun Li frame->cmd_flag = CMD_FLAG_DATA_IN; 1729f0c568a4SJianyun Li frame->req_function = CL_FUN_SCSI_CMD; 1730f0c568a4SJianyun Li frame->cdb_length = MAX_COMMAND_SIZE; 1731f0c568a4SJianyun Li frame->data_transfer_length = sizeof(struct mvumi_event_req); 1732f0c568a4SJianyun Li memset(frame->cdb, 0, MAX_COMMAND_SIZE); 1733f0c568a4SJianyun Li frame->cdb[0] = APICDB0_EVENT; 1734f0c568a4SJianyun Li frame->cdb[1] = msg; 1735f0c568a4SJianyun Li mvumi_issue_blocked_cmd(mhba, cmd); 1736f0c568a4SJianyun Li 1737f0c568a4SJianyun Li if (cmd->cmd_status != SAM_STAT_GOOD) 1738f0c568a4SJianyun Li dev_err(&mhba->pdev->dev, "get event failed, status=0x%x.\n", 1739f0c568a4SJianyun Li cmd->cmd_status); 1740f0c568a4SJianyun Li else 1741f0c568a4SJianyun Li mvumi_notification(mhba, cmd->frame->cdb[1], cmd->data_buf); 1742f0c568a4SJianyun Li 1743f0c568a4SJianyun Li mvumi_delete_internal_cmd(mhba, cmd); 1744f0c568a4SJianyun Li return 0; 1745f0c568a4SJianyun Li } 1746f0c568a4SJianyun Li 1747f0c568a4SJianyun Li static void mvumi_scan_events(struct work_struct *work) 1748f0c568a4SJianyun Li { 1749f0c568a4SJianyun Li struct mvumi_events_wq *mu_ev = 1750f0c568a4SJianyun Li container_of(work, struct mvumi_events_wq, work_q); 1751f0c568a4SJianyun Li 1752f0c568a4SJianyun Li mvumi_get_event(mu_ev->mhba, mu_ev->event); 1753f0c568a4SJianyun Li kfree(mu_ev); 1754f0c568a4SJianyun Li } 1755f0c568a4SJianyun Li 1756bd756ddeSShun Fu static void mvumi_launch_events(struct mvumi_hba *mhba, u32 isr_status) 1757f0c568a4SJianyun Li { 1758f0c568a4SJianyun Li struct mvumi_events_wq *mu_ev; 1759f0c568a4SJianyun Li 1760bd756ddeSShun Fu while (isr_status & (DRBL_BUS_CHANGE | DRBL_EVENT_NOTIFY)) { 1761bd756ddeSShun Fu if (isr_status & DRBL_BUS_CHANGE) { 1762bd756ddeSShun Fu atomic_inc(&mhba->pnp_count); 1763bd756ddeSShun Fu wake_up_process(mhba->dm_thread); 1764bd756ddeSShun Fu isr_status &= ~(DRBL_BUS_CHANGE); 1765bd756ddeSShun Fu continue; 1766bd756ddeSShun Fu } 1767bd756ddeSShun Fu 1768f0c568a4SJianyun Li mu_ev = kzalloc(sizeof(*mu_ev), GFP_ATOMIC); 1769f0c568a4SJianyun Li if (mu_ev) { 1770f0c568a4SJianyun Li INIT_WORK(&mu_ev->work_q, mvumi_scan_events); 1771f0c568a4SJianyun Li mu_ev->mhba = mhba; 1772bd756ddeSShun Fu mu_ev->event = APICDB1_EVENT_GETEVENT; 1773bd756ddeSShun Fu isr_status &= ~(DRBL_EVENT_NOTIFY); 1774f0c568a4SJianyun Li mu_ev->param = NULL; 1775f0c568a4SJianyun Li schedule_work(&mu_ev->work_q); 1776f0c568a4SJianyun Li } 1777f0c568a4SJianyun Li } 1778bd756ddeSShun Fu } 1779f0c568a4SJianyun Li 1780f0c568a4SJianyun Li static void mvumi_handle_clob(struct mvumi_hba *mhba) 1781f0c568a4SJianyun Li { 1782f0c568a4SJianyun Li struct mvumi_rsp_frame *ob_frame; 1783f0c568a4SJianyun Li struct mvumi_cmd *cmd; 1784f0c568a4SJianyun Li struct mvumi_ob_data *pool; 1785f0c568a4SJianyun Li 1786f0c568a4SJianyun Li while (!list_empty(&mhba->free_ob_list)) { 1787f0c568a4SJianyun Li pool = list_first_entry(&mhba->free_ob_list, 1788f0c568a4SJianyun Li struct mvumi_ob_data, list); 1789f0c568a4SJianyun Li list_del_init(&pool->list); 1790f0c568a4SJianyun Li list_add_tail(&pool->list, &mhba->ob_data_list); 1791f0c568a4SJianyun Li 1792f0c568a4SJianyun Li ob_frame = (struct mvumi_rsp_frame *) &pool->data[0]; 1793f0c568a4SJianyun Li cmd = mhba->tag_cmd[ob_frame->tag]; 1794f0c568a4SJianyun Li 1795f0c568a4SJianyun Li atomic_dec(&mhba->fw_outstanding); 1796f0c568a4SJianyun Li mhba->tag_cmd[ob_frame->tag] = 0; 1797f0c568a4SJianyun Li tag_release_one(mhba, &mhba->tag_pool, ob_frame->tag); 1798f0c568a4SJianyun Li if (cmd->scmd) 1799f0c568a4SJianyun Li mvumi_complete_cmd(mhba, cmd, ob_frame); 1800f0c568a4SJianyun Li else 1801f0c568a4SJianyun Li mvumi_complete_internal_cmd(mhba, cmd, ob_frame); 1802f0c568a4SJianyun Li } 1803f0c568a4SJianyun Li mhba->instancet->fire_cmd(mhba, NULL); 1804f0c568a4SJianyun Li } 1805f0c568a4SJianyun Li 1806f0c568a4SJianyun Li static irqreturn_t mvumi_isr_handler(int irq, void *devp) 1807f0c568a4SJianyun Li { 1808f0c568a4SJianyun Li struct mvumi_hba *mhba = (struct mvumi_hba *) devp; 1809f0c568a4SJianyun Li unsigned long flags; 1810f0c568a4SJianyun Li 1811f0c568a4SJianyun Li spin_lock_irqsave(mhba->shost->host_lock, flags); 1812f0c568a4SJianyun Li if (unlikely(mhba->instancet->clear_intr(mhba) || !mhba->global_isr)) { 1813f0c568a4SJianyun Li spin_unlock_irqrestore(mhba->shost->host_lock, flags); 1814f0c568a4SJianyun Li return IRQ_NONE; 1815f0c568a4SJianyun Li } 1816f0c568a4SJianyun Li 1817bd756ddeSShun Fu if (mhba->global_isr & mhba->regs->int_dl_cpu2pciea) { 1818bd756ddeSShun Fu if (mhba->isr_status & (DRBL_BUS_CHANGE | DRBL_EVENT_NOTIFY)) 1819bd756ddeSShun Fu mvumi_launch_events(mhba, mhba->isr_status); 1820f0c568a4SJianyun Li if (mhba->isr_status & DRBL_HANDSHAKE_ISR) { 1821f0c568a4SJianyun Li dev_warn(&mhba->pdev->dev, "enter handshake again!\n"); 1822f0c568a4SJianyun Li mvumi_handshake(mhba); 1823f0c568a4SJianyun Li } 1824bd756ddeSShun Fu 1825f0c568a4SJianyun Li } 1826f0c568a4SJianyun Li 1827bd756ddeSShun Fu if (mhba->global_isr & mhba->regs->int_comaout) 1828f0c568a4SJianyun Li mvumi_receive_ob_list_entry(mhba); 1829f0c568a4SJianyun Li 1830f0c568a4SJianyun Li mhba->global_isr = 0; 1831f0c568a4SJianyun Li mhba->isr_status = 0; 1832f0c568a4SJianyun Li if (mhba->fw_state == FW_STATE_STARTED) 1833f0c568a4SJianyun Li mvumi_handle_clob(mhba); 1834f0c568a4SJianyun Li spin_unlock_irqrestore(mhba->shost->host_lock, flags); 1835f0c568a4SJianyun Li return IRQ_HANDLED; 1836f0c568a4SJianyun Li } 1837f0c568a4SJianyun Li 1838f0c568a4SJianyun Li static enum mvumi_qc_result mvumi_send_command(struct mvumi_hba *mhba, 1839f0c568a4SJianyun Li struct mvumi_cmd *cmd) 1840f0c568a4SJianyun Li { 1841f0c568a4SJianyun Li void *ib_entry; 1842f0c568a4SJianyun Li struct mvumi_msg_frame *ib_frame; 1843f0c568a4SJianyun Li unsigned int frame_len; 1844f0c568a4SJianyun Li 1845f0c568a4SJianyun Li ib_frame = cmd->frame; 1846f0c568a4SJianyun Li if (unlikely(mhba->fw_state != FW_STATE_STARTED)) { 1847f0c568a4SJianyun Li dev_dbg(&mhba->pdev->dev, "firmware not ready.\n"); 1848f0c568a4SJianyun Li return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE; 1849f0c568a4SJianyun Li } 1850f0c568a4SJianyun Li if (tag_is_empty(&mhba->tag_pool)) { 1851f0c568a4SJianyun Li dev_dbg(&mhba->pdev->dev, "no free tag.\n"); 1852f0c568a4SJianyun Li return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE; 1853f0c568a4SJianyun Li } 1854bd756ddeSShun Fu mvumi_get_ib_list_entry(mhba, &ib_entry); 1855f0c568a4SJianyun Li 1856f0c568a4SJianyun Li cmd->frame->tag = tag_get_one(mhba, &mhba->tag_pool); 1857f0c568a4SJianyun Li cmd->frame->request_id = mhba->io_seq++; 1858f0c568a4SJianyun Li cmd->request_id = cmd->frame->request_id; 1859f0c568a4SJianyun Li mhba->tag_cmd[cmd->frame->tag] = cmd; 1860f0c568a4SJianyun Li frame_len = sizeof(*ib_frame) - 4 + 1861f0c568a4SJianyun Li ib_frame->sg_counts * sizeof(struct mvumi_sgl); 1862bd756ddeSShun Fu if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) { 1863bd756ddeSShun Fu struct mvumi_dyn_list_entry *dle; 1864bd756ddeSShun Fu dle = ib_entry; 1865bd756ddeSShun Fu dle->src_low_addr = 1866bd756ddeSShun Fu cpu_to_le32(lower_32_bits(cmd->frame_phys)); 1867bd756ddeSShun Fu dle->src_high_addr = 1868bd756ddeSShun Fu cpu_to_le32(upper_32_bits(cmd->frame_phys)); 1869bd756ddeSShun Fu dle->if_length = (frame_len >> 2) & 0xFFF; 1870bd756ddeSShun Fu } else { 1871f0c568a4SJianyun Li memcpy(ib_entry, ib_frame, frame_len); 1872bd756ddeSShun Fu } 1873f0c568a4SJianyun Li return MV_QUEUE_COMMAND_RESULT_SENT; 1874f0c568a4SJianyun Li } 1875f0c568a4SJianyun Li 1876f0c568a4SJianyun Li static void mvumi_fire_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd) 1877f0c568a4SJianyun Li { 1878f0c568a4SJianyun Li unsigned short num_of_cl_sent = 0; 1879bd756ddeSShun Fu unsigned int count; 1880f0c568a4SJianyun Li enum mvumi_qc_result result; 1881f0c568a4SJianyun Li 1882f0c568a4SJianyun Li if (cmd) 1883f0c568a4SJianyun Li list_add_tail(&cmd->queue_pointer, &mhba->waiting_req_list); 1884bd756ddeSShun Fu count = mhba->instancet->check_ib_list(mhba); 1885bd756ddeSShun Fu if (list_empty(&mhba->waiting_req_list) || !count) 1886bd756ddeSShun Fu return; 1887f0c568a4SJianyun Li 1888bd756ddeSShun Fu do { 1889f0c568a4SJianyun Li cmd = list_first_entry(&mhba->waiting_req_list, 1890f0c568a4SJianyun Li struct mvumi_cmd, queue_pointer); 1891f0c568a4SJianyun Li list_del_init(&cmd->queue_pointer); 1892f0c568a4SJianyun Li result = mvumi_send_command(mhba, cmd); 1893f0c568a4SJianyun Li switch (result) { 1894f0c568a4SJianyun Li case MV_QUEUE_COMMAND_RESULT_SENT: 1895f0c568a4SJianyun Li num_of_cl_sent++; 1896f0c568a4SJianyun Li break; 1897f0c568a4SJianyun Li case MV_QUEUE_COMMAND_RESULT_NO_RESOURCE: 1898f0c568a4SJianyun Li list_add(&cmd->queue_pointer, &mhba->waiting_req_list); 1899f0c568a4SJianyun Li if (num_of_cl_sent > 0) 1900f0c568a4SJianyun Li mvumi_send_ib_list_entry(mhba); 1901f0c568a4SJianyun Li 1902f0c568a4SJianyun Li return; 1903f0c568a4SJianyun Li } 1904bd756ddeSShun Fu } while (!list_empty(&mhba->waiting_req_list) && count--); 1905bd756ddeSShun Fu 1906f0c568a4SJianyun Li if (num_of_cl_sent > 0) 1907f0c568a4SJianyun Li mvumi_send_ib_list_entry(mhba); 1908f0c568a4SJianyun Li } 1909f0c568a4SJianyun Li 1910f0c568a4SJianyun Li /** 1911f0c568a4SJianyun Li * mvumi_enable_intr - Enables interrupts 1912bd756ddeSShun Fu * @mhba: Adapter soft state 1913f0c568a4SJianyun Li */ 1914bd756ddeSShun Fu static void mvumi_enable_intr(struct mvumi_hba *mhba) 1915f0c568a4SJianyun Li { 1916f0c568a4SJianyun Li unsigned int mask; 1917bd756ddeSShun Fu struct mvumi_hw_regs *regs = mhba->regs; 1918f0c568a4SJianyun Li 1919bd756ddeSShun Fu iowrite32(regs->int_drbl_int_mask, regs->arm_to_pciea_mask_reg); 1920bd756ddeSShun Fu mask = ioread32(regs->enpointa_mask_reg); 1921bd756ddeSShun Fu mask |= regs->int_dl_cpu2pciea | regs->int_comaout | regs->int_comaerr; 1922bd756ddeSShun Fu iowrite32(mask, regs->enpointa_mask_reg); 1923f0c568a4SJianyun Li } 1924f0c568a4SJianyun Li 1925f0c568a4SJianyun Li /** 1926f0c568a4SJianyun Li * mvumi_disable_intr -Disables interrupt 1927bd756ddeSShun Fu * @mhba: Adapter soft state 1928f0c568a4SJianyun Li */ 1929bd756ddeSShun Fu static void mvumi_disable_intr(struct mvumi_hba *mhba) 1930f0c568a4SJianyun Li { 1931f0c568a4SJianyun Li unsigned int mask; 1932bd756ddeSShun Fu struct mvumi_hw_regs *regs = mhba->regs; 1933f0c568a4SJianyun Li 1934bd756ddeSShun Fu iowrite32(0, regs->arm_to_pciea_mask_reg); 1935bd756ddeSShun Fu mask = ioread32(regs->enpointa_mask_reg); 1936bd756ddeSShun Fu mask &= ~(regs->int_dl_cpu2pciea | regs->int_comaout | 1937bd756ddeSShun Fu regs->int_comaerr); 1938bd756ddeSShun Fu iowrite32(mask, regs->enpointa_mask_reg); 1939f0c568a4SJianyun Li } 1940f0c568a4SJianyun Li 1941f0c568a4SJianyun Li static int mvumi_clear_intr(void *extend) 1942f0c568a4SJianyun Li { 1943f0c568a4SJianyun Li struct mvumi_hba *mhba = (struct mvumi_hba *) extend; 1944f0c568a4SJianyun Li unsigned int status, isr_status = 0, tmp = 0; 1945bd756ddeSShun Fu struct mvumi_hw_regs *regs = mhba->regs; 1946f0c568a4SJianyun Li 1947bd756ddeSShun Fu status = ioread32(regs->main_int_cause_reg); 1948bd756ddeSShun Fu if (!(status & regs->int_mu) || status == 0xFFFFFFFF) 1949f0c568a4SJianyun Li return 1; 1950bd756ddeSShun Fu if (unlikely(status & regs->int_comaerr)) { 1951bd756ddeSShun Fu tmp = ioread32(regs->outb_isr_cause); 1952bd756ddeSShun Fu if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) { 1953bd756ddeSShun Fu if (tmp & regs->clic_out_err) { 1954bd756ddeSShun Fu iowrite32(tmp & regs->clic_out_err, 1955bd756ddeSShun Fu regs->outb_isr_cause); 1956bd756ddeSShun Fu } 1957bd756ddeSShun Fu } else { 1958bd756ddeSShun Fu if (tmp & (regs->clic_in_err | regs->clic_out_err)) 1959bd756ddeSShun Fu iowrite32(tmp & (regs->clic_in_err | 1960bd756ddeSShun Fu regs->clic_out_err), 1961bd756ddeSShun Fu regs->outb_isr_cause); 1962bd756ddeSShun Fu } 1963bd756ddeSShun Fu status ^= mhba->regs->int_comaerr; 1964f0c568a4SJianyun Li /* inbound or outbound parity error, command will timeout */ 1965f0c568a4SJianyun Li } 1966bd756ddeSShun Fu if (status & regs->int_comaout) { 1967bd756ddeSShun Fu tmp = ioread32(regs->outb_isr_cause); 1968bd756ddeSShun Fu if (tmp & regs->clic_irq) 1969bd756ddeSShun Fu iowrite32(tmp & regs->clic_irq, regs->outb_isr_cause); 1970f0c568a4SJianyun Li } 1971bd756ddeSShun Fu if (status & regs->int_dl_cpu2pciea) { 1972bd756ddeSShun Fu isr_status = ioread32(regs->arm_to_pciea_drbl_reg); 1973f0c568a4SJianyun Li if (isr_status) 1974bd756ddeSShun Fu iowrite32(isr_status, regs->arm_to_pciea_drbl_reg); 1975f0c568a4SJianyun Li } 1976f0c568a4SJianyun Li 1977f0c568a4SJianyun Li mhba->global_isr = status; 1978f0c568a4SJianyun Li mhba->isr_status = isr_status; 1979f0c568a4SJianyun Li 1980f0c568a4SJianyun Li return 0; 1981f0c568a4SJianyun Li } 1982f0c568a4SJianyun Li 1983f0c568a4SJianyun Li /** 1984f0c568a4SJianyun Li * mvumi_read_fw_status_reg - returns the current FW status value 1985bd756ddeSShun Fu * @mhba: Adapter soft state 1986f0c568a4SJianyun Li */ 1987bd756ddeSShun Fu static unsigned int mvumi_read_fw_status_reg(struct mvumi_hba *mhba) 1988f0c568a4SJianyun Li { 1989f0c568a4SJianyun Li unsigned int status; 1990f0c568a4SJianyun Li 1991bd756ddeSShun Fu status = ioread32(mhba->regs->arm_to_pciea_drbl_reg); 1992f0c568a4SJianyun Li if (status) 1993bd756ddeSShun Fu iowrite32(status, mhba->regs->arm_to_pciea_drbl_reg); 1994f0c568a4SJianyun Li return status; 1995f0c568a4SJianyun Li } 1996f0c568a4SJianyun Li 1997bd756ddeSShun Fu static struct mvumi_instance_template mvumi_instance_9143 = { 1998f0c568a4SJianyun Li .fire_cmd = mvumi_fire_cmd, 1999f0c568a4SJianyun Li .enable_intr = mvumi_enable_intr, 2000f0c568a4SJianyun Li .disable_intr = mvumi_disable_intr, 2001f0c568a4SJianyun Li .clear_intr = mvumi_clear_intr, 2002f0c568a4SJianyun Li .read_fw_status_reg = mvumi_read_fw_status_reg, 2003bd756ddeSShun Fu .check_ib_list = mvumi_check_ib_list_9143, 2004bd756ddeSShun Fu .check_ob_list = mvumi_check_ob_list_9143, 2005bd756ddeSShun Fu .reset_host = mvumi_reset_host_9143, 2006bd756ddeSShun Fu }; 2007bd756ddeSShun Fu 2008bd756ddeSShun Fu static struct mvumi_instance_template mvumi_instance_9580 = { 2009bd756ddeSShun Fu .fire_cmd = mvumi_fire_cmd, 2010bd756ddeSShun Fu .enable_intr = mvumi_enable_intr, 2011bd756ddeSShun Fu .disable_intr = mvumi_disable_intr, 2012bd756ddeSShun Fu .clear_intr = mvumi_clear_intr, 2013bd756ddeSShun Fu .read_fw_status_reg = mvumi_read_fw_status_reg, 2014bd756ddeSShun Fu .check_ib_list = mvumi_check_ib_list_9580, 2015bd756ddeSShun Fu .check_ob_list = mvumi_check_ob_list_9580, 2016bd756ddeSShun Fu .reset_host = mvumi_reset_host_9580, 2017f0c568a4SJianyun Li }; 2018f0c568a4SJianyun Li 2019f0c568a4SJianyun Li static int mvumi_slave_configure(struct scsi_device *sdev) 2020f0c568a4SJianyun Li { 2021f0c568a4SJianyun Li struct mvumi_hba *mhba; 2022f0c568a4SJianyun Li unsigned char bitcount = sizeof(unsigned char) * 8; 2023f0c568a4SJianyun Li 2024f0c568a4SJianyun Li mhba = (struct mvumi_hba *) sdev->host->hostdata; 2025f0c568a4SJianyun Li if (sdev->id >= mhba->max_target_id) 2026f0c568a4SJianyun Li return -EINVAL; 2027f0c568a4SJianyun Li 2028f0c568a4SJianyun Li mhba->target_map[sdev->id / bitcount] |= (1 << (sdev->id % bitcount)); 2029f0c568a4SJianyun Li return 0; 2030f0c568a4SJianyun Li } 2031f0c568a4SJianyun Li 2032f0c568a4SJianyun Li /** 2033f0c568a4SJianyun Li * mvumi_build_frame - Prepares a direct cdb (DCDB) command 2034f0c568a4SJianyun Li * @mhba: Adapter soft state 2035f0c568a4SJianyun Li * @scmd: SCSI command 2036f0c568a4SJianyun Li * @cmd: Command to be prepared in 2037f0c568a4SJianyun Li * 2038f0c568a4SJianyun Li * This function prepares CDB commands. These are typcially pass-through 2039f0c568a4SJianyun Li * commands to the devices. 2040f0c568a4SJianyun Li */ 2041f0c568a4SJianyun Li static unsigned char mvumi_build_frame(struct mvumi_hba *mhba, 2042f0c568a4SJianyun Li struct scsi_cmnd *scmd, struct mvumi_cmd *cmd) 2043f0c568a4SJianyun Li { 2044f0c568a4SJianyun Li struct mvumi_msg_frame *pframe; 2045f0c568a4SJianyun Li 2046f0c568a4SJianyun Li cmd->scmd = scmd; 2047f0c568a4SJianyun Li cmd->cmd_status = REQ_STATUS_PENDING; 2048f0c568a4SJianyun Li pframe = cmd->frame; 2049f0c568a4SJianyun Li pframe->device_id = ((unsigned short) scmd->device->id) | 2050f0c568a4SJianyun Li (((unsigned short) scmd->device->lun) << 8); 2051f0c568a4SJianyun Li pframe->cmd_flag = 0; 2052f0c568a4SJianyun Li 2053f0c568a4SJianyun Li switch (scmd->sc_data_direction) { 2054f0c568a4SJianyun Li case DMA_NONE: 2055f0c568a4SJianyun Li pframe->cmd_flag |= CMD_FLAG_NON_DATA; 2056f0c568a4SJianyun Li break; 2057f0c568a4SJianyun Li case DMA_FROM_DEVICE: 2058f0c568a4SJianyun Li pframe->cmd_flag |= CMD_FLAG_DATA_IN; 2059f0c568a4SJianyun Li break; 2060f0c568a4SJianyun Li case DMA_TO_DEVICE: 2061f0c568a4SJianyun Li pframe->cmd_flag |= CMD_FLAG_DATA_OUT; 2062f0c568a4SJianyun Li break; 2063f0c568a4SJianyun Li case DMA_BIDIRECTIONAL: 2064f0c568a4SJianyun Li default: 2065f0c568a4SJianyun Li dev_warn(&mhba->pdev->dev, "unexpected data direction[%d] " 2066f0c568a4SJianyun Li "cmd[0x%x]\n", scmd->sc_data_direction, scmd->cmnd[0]); 2067f0c568a4SJianyun Li goto error; 2068f0c568a4SJianyun Li } 2069f0c568a4SJianyun Li 2070f0c568a4SJianyun Li pframe->cdb_length = scmd->cmd_len; 2071f0c568a4SJianyun Li memcpy(pframe->cdb, scmd->cmnd, pframe->cdb_length); 2072f0c568a4SJianyun Li pframe->req_function = CL_FUN_SCSI_CMD; 2073f0c568a4SJianyun Li if (scsi_bufflen(scmd)) { 2074f0c568a4SJianyun Li if (mvumi_make_sgl(mhba, scmd, &pframe->payload[0], 2075f0c568a4SJianyun Li &pframe->sg_counts)) 2076f0c568a4SJianyun Li goto error; 2077f0c568a4SJianyun Li 2078f0c568a4SJianyun Li pframe->data_transfer_length = scsi_bufflen(scmd); 2079f0c568a4SJianyun Li } else { 2080f0c568a4SJianyun Li pframe->sg_counts = 0; 2081f0c568a4SJianyun Li pframe->data_transfer_length = 0; 2082f0c568a4SJianyun Li } 2083f0c568a4SJianyun Li return 0; 2084f0c568a4SJianyun Li 2085f0c568a4SJianyun Li error: 2086f0c568a4SJianyun Li scmd->result = (DID_OK << 16) | (DRIVER_SENSE << 24) | 2087f0c568a4SJianyun Li SAM_STAT_CHECK_CONDITION; 2088f0c568a4SJianyun Li scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x24, 2089f0c568a4SJianyun Li 0); 2090f0c568a4SJianyun Li return -1; 2091f0c568a4SJianyun Li } 2092f0c568a4SJianyun Li 2093f0c568a4SJianyun Li /** 2094f0c568a4SJianyun Li * mvumi_queue_command - Queue entry point 2095f0c568a4SJianyun Li * @scmd: SCSI command to be queued 2096f0c568a4SJianyun Li * @done: Callback entry point 2097f0c568a4SJianyun Li */ 2098f0c568a4SJianyun Li static int mvumi_queue_command(struct Scsi_Host *shost, 2099f0c568a4SJianyun Li struct scsi_cmnd *scmd) 2100f0c568a4SJianyun Li { 2101f0c568a4SJianyun Li struct mvumi_cmd *cmd; 2102f0c568a4SJianyun Li struct mvumi_hba *mhba; 2103f0c568a4SJianyun Li unsigned long irq_flags; 2104f0c568a4SJianyun Li 2105f0c568a4SJianyun Li spin_lock_irqsave(shost->host_lock, irq_flags); 2106f0c568a4SJianyun Li scsi_cmd_get_serial(shost, scmd); 2107f0c568a4SJianyun Li 2108f0c568a4SJianyun Li mhba = (struct mvumi_hba *) shost->hostdata; 2109f0c568a4SJianyun Li scmd->result = 0; 2110f0c568a4SJianyun Li cmd = mvumi_get_cmd(mhba); 2111f0c568a4SJianyun Li if (unlikely(!cmd)) { 2112f0c568a4SJianyun Li spin_unlock_irqrestore(shost->host_lock, irq_flags); 2113f0c568a4SJianyun Li return SCSI_MLQUEUE_HOST_BUSY; 2114f0c568a4SJianyun Li } 2115f0c568a4SJianyun Li 2116f0c568a4SJianyun Li if (unlikely(mvumi_build_frame(mhba, scmd, cmd))) 2117f0c568a4SJianyun Li goto out_return_cmd; 2118f0c568a4SJianyun Li 2119f0c568a4SJianyun Li cmd->scmd = scmd; 2120f0c568a4SJianyun Li scmd->SCp.ptr = (char *) cmd; 2121f0c568a4SJianyun Li mhba->instancet->fire_cmd(mhba, cmd); 2122f0c568a4SJianyun Li spin_unlock_irqrestore(shost->host_lock, irq_flags); 2123f0c568a4SJianyun Li return 0; 2124f0c568a4SJianyun Li 2125f0c568a4SJianyun Li out_return_cmd: 2126f0c568a4SJianyun Li mvumi_return_cmd(mhba, cmd); 2127f0c568a4SJianyun Li scmd->scsi_done(scmd); 2128f0c568a4SJianyun Li spin_unlock_irqrestore(shost->host_lock, irq_flags); 2129f0c568a4SJianyun Li return 0; 2130f0c568a4SJianyun Li } 2131f0c568a4SJianyun Li 2132f0c568a4SJianyun Li static enum blk_eh_timer_return mvumi_timed_out(struct scsi_cmnd *scmd) 2133f0c568a4SJianyun Li { 2134f0c568a4SJianyun Li struct mvumi_cmd *cmd = (struct mvumi_cmd *) scmd->SCp.ptr; 2135f0c568a4SJianyun Li struct Scsi_Host *host = scmd->device->host; 2136f0c568a4SJianyun Li struct mvumi_hba *mhba = shost_priv(host); 2137f0c568a4SJianyun Li unsigned long flags; 2138f0c568a4SJianyun Li 2139f0c568a4SJianyun Li spin_lock_irqsave(mhba->shost->host_lock, flags); 2140f0c568a4SJianyun Li 2141f0c568a4SJianyun Li if (mhba->tag_cmd[cmd->frame->tag]) { 2142f0c568a4SJianyun Li mhba->tag_cmd[cmd->frame->tag] = 0; 2143f0c568a4SJianyun Li tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag); 2144f0c568a4SJianyun Li } 2145f0c568a4SJianyun Li if (!list_empty(&cmd->queue_pointer)) 2146f0c568a4SJianyun Li list_del_init(&cmd->queue_pointer); 2147f0c568a4SJianyun Li else 2148f0c568a4SJianyun Li atomic_dec(&mhba->fw_outstanding); 2149f0c568a4SJianyun Li 2150f0c568a4SJianyun Li scmd->result = (DRIVER_INVALID << 24) | (DID_ABORT << 16); 2151f0c568a4SJianyun Li scmd->SCp.ptr = NULL; 2152f0c568a4SJianyun Li if (scsi_bufflen(scmd)) { 2153*ab8e7f4bSChristoph Hellwig dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd), 2154f0c568a4SJianyun Li scsi_sg_count(scmd), 2155*ab8e7f4bSChristoph Hellwig scmd->sc_data_direction); 2156f0c568a4SJianyun Li } 2157f0c568a4SJianyun Li mvumi_return_cmd(mhba, cmd); 2158f0c568a4SJianyun Li spin_unlock_irqrestore(mhba->shost->host_lock, flags); 2159f0c568a4SJianyun Li 21606600593cSChristoph Hellwig return BLK_EH_DONE; 2161f0c568a4SJianyun Li } 2162f0c568a4SJianyun Li 2163f0c568a4SJianyun Li static int 2164f0c568a4SJianyun Li mvumi_bios_param(struct scsi_device *sdev, struct block_device *bdev, 2165f0c568a4SJianyun Li sector_t capacity, int geom[]) 2166f0c568a4SJianyun Li { 2167f0c568a4SJianyun Li int heads, sectors; 2168f0c568a4SJianyun Li sector_t cylinders; 2169f0c568a4SJianyun Li unsigned long tmp; 2170f0c568a4SJianyun Li 2171f0c568a4SJianyun Li heads = 64; 2172f0c568a4SJianyun Li sectors = 32; 2173f0c568a4SJianyun Li tmp = heads * sectors; 2174f0c568a4SJianyun Li cylinders = capacity; 2175f0c568a4SJianyun Li sector_div(cylinders, tmp); 2176f0c568a4SJianyun Li 2177f0c568a4SJianyun Li if (capacity >= 0x200000) { 2178f0c568a4SJianyun Li heads = 255; 2179f0c568a4SJianyun Li sectors = 63; 2180f0c568a4SJianyun Li tmp = heads * sectors; 2181f0c568a4SJianyun Li cylinders = capacity; 2182f0c568a4SJianyun Li sector_div(cylinders, tmp); 2183f0c568a4SJianyun Li } 2184f0c568a4SJianyun Li geom[0] = heads; 2185f0c568a4SJianyun Li geom[1] = sectors; 2186f0c568a4SJianyun Li geom[2] = cylinders; 2187f0c568a4SJianyun Li 2188f0c568a4SJianyun Li return 0; 2189f0c568a4SJianyun Li } 2190f0c568a4SJianyun Li 2191f0c568a4SJianyun Li static struct scsi_host_template mvumi_template = { 2192f0c568a4SJianyun Li 2193f0c568a4SJianyun Li .module = THIS_MODULE, 2194f0c568a4SJianyun Li .name = "Marvell Storage Controller", 2195f0c568a4SJianyun Li .slave_configure = mvumi_slave_configure, 2196f0c568a4SJianyun Li .queuecommand = mvumi_queue_command, 2197103eb3b5SChristoph Hellwig .eh_timed_out = mvumi_timed_out, 2198f0c568a4SJianyun Li .eh_host_reset_handler = mvumi_host_reset, 2199f0c568a4SJianyun Li .bios_param = mvumi_bios_param, 2200f0c568a4SJianyun Li .this_id = -1, 2201f0c568a4SJianyun Li }; 2202f0c568a4SJianyun Li 2203bd756ddeSShun Fu static int mvumi_cfg_hw_reg(struct mvumi_hba *mhba) 2204bd756ddeSShun Fu { 2205bd756ddeSShun Fu void *base = NULL; 2206bd756ddeSShun Fu struct mvumi_hw_regs *regs; 2207bd756ddeSShun Fu 2208bd756ddeSShun Fu switch (mhba->pdev->device) { 2209bd756ddeSShun Fu case PCI_DEVICE_ID_MARVELL_MV9143: 2210bd756ddeSShun Fu mhba->mmio = mhba->base_addr[0]; 2211bd756ddeSShun Fu base = mhba->mmio; 2212bd756ddeSShun Fu if (!mhba->regs) { 2213bd756ddeSShun Fu mhba->regs = kzalloc(sizeof(*regs), GFP_KERNEL); 2214bd756ddeSShun Fu if (mhba->regs == NULL) 2215bd756ddeSShun Fu return -ENOMEM; 2216bd756ddeSShun Fu } 2217bd756ddeSShun Fu regs = mhba->regs; 2218bd756ddeSShun Fu 2219bd756ddeSShun Fu /* For Arm */ 2220bd756ddeSShun Fu regs->ctrl_sts_reg = base + 0x20104; 2221bd756ddeSShun Fu regs->rstoutn_mask_reg = base + 0x20108; 2222bd756ddeSShun Fu regs->sys_soft_rst_reg = base + 0x2010C; 2223bd756ddeSShun Fu regs->main_int_cause_reg = base + 0x20200; 2224bd756ddeSShun Fu regs->enpointa_mask_reg = base + 0x2020C; 2225bd756ddeSShun Fu regs->rstoutn_en_reg = base + 0xF1400; 2226bd756ddeSShun Fu /* For Doorbell */ 2227bd756ddeSShun Fu regs->pciea_to_arm_drbl_reg = base + 0x20400; 2228bd756ddeSShun Fu regs->arm_to_pciea_drbl_reg = base + 0x20408; 2229bd756ddeSShun Fu regs->arm_to_pciea_mask_reg = base + 0x2040C; 2230bd756ddeSShun Fu regs->pciea_to_arm_msg0 = base + 0x20430; 2231bd756ddeSShun Fu regs->pciea_to_arm_msg1 = base + 0x20434; 2232bd756ddeSShun Fu regs->arm_to_pciea_msg0 = base + 0x20438; 2233bd756ddeSShun Fu regs->arm_to_pciea_msg1 = base + 0x2043C; 2234bd756ddeSShun Fu 2235bd756ddeSShun Fu /* For Message Unit */ 2236bd756ddeSShun Fu 2237bd756ddeSShun Fu regs->inb_aval_count_basel = base + 0x508; 2238bd756ddeSShun Fu regs->inb_aval_count_baseh = base + 0x50C; 2239bd756ddeSShun Fu regs->inb_write_pointer = base + 0x518; 2240bd756ddeSShun Fu regs->inb_read_pointer = base + 0x51C; 2241bd756ddeSShun Fu regs->outb_coal_cfg = base + 0x568; 2242bd756ddeSShun Fu regs->outb_copy_basel = base + 0x5B0; 2243bd756ddeSShun Fu regs->outb_copy_baseh = base + 0x5B4; 2244bd756ddeSShun Fu regs->outb_copy_pointer = base + 0x544; 2245bd756ddeSShun Fu regs->outb_read_pointer = base + 0x548; 2246bd756ddeSShun Fu regs->outb_isr_cause = base + 0x560; 2247bd756ddeSShun Fu regs->outb_coal_cfg = base + 0x568; 2248bd756ddeSShun Fu /* Bit setting for HW */ 2249bd756ddeSShun Fu regs->int_comaout = 1 << 8; 2250bd756ddeSShun Fu regs->int_comaerr = 1 << 6; 2251bd756ddeSShun Fu regs->int_dl_cpu2pciea = 1 << 1; 2252bd756ddeSShun Fu regs->cl_pointer_toggle = 1 << 12; 2253bd756ddeSShun Fu regs->clic_irq = 1 << 1; 2254bd756ddeSShun Fu regs->clic_in_err = 1 << 8; 2255bd756ddeSShun Fu regs->clic_out_err = 1 << 12; 2256bd756ddeSShun Fu regs->cl_slot_num_mask = 0xFFF; 2257bd756ddeSShun Fu regs->int_drbl_int_mask = 0x3FFFFFFF; 2258bd756ddeSShun Fu regs->int_mu = regs->int_dl_cpu2pciea | regs->int_comaout | 2259bd756ddeSShun Fu regs->int_comaerr; 2260bd756ddeSShun Fu break; 2261bd756ddeSShun Fu case PCI_DEVICE_ID_MARVELL_MV9580: 2262bd756ddeSShun Fu mhba->mmio = mhba->base_addr[2]; 2263bd756ddeSShun Fu base = mhba->mmio; 2264bd756ddeSShun Fu if (!mhba->regs) { 2265bd756ddeSShun Fu mhba->regs = kzalloc(sizeof(*regs), GFP_KERNEL); 2266bd756ddeSShun Fu if (mhba->regs == NULL) 2267bd756ddeSShun Fu return -ENOMEM; 2268bd756ddeSShun Fu } 2269bd756ddeSShun Fu regs = mhba->regs; 2270bd756ddeSShun Fu /* For Arm */ 2271bd756ddeSShun Fu regs->ctrl_sts_reg = base + 0x20104; 2272bd756ddeSShun Fu regs->rstoutn_mask_reg = base + 0x1010C; 2273bd756ddeSShun Fu regs->sys_soft_rst_reg = base + 0x10108; 2274bd756ddeSShun Fu regs->main_int_cause_reg = base + 0x10200; 2275bd756ddeSShun Fu regs->enpointa_mask_reg = base + 0x1020C; 2276bd756ddeSShun Fu regs->rstoutn_en_reg = base + 0xF1400; 2277bd756ddeSShun Fu 2278bd756ddeSShun Fu /* For Doorbell */ 2279bd756ddeSShun Fu regs->pciea_to_arm_drbl_reg = base + 0x10460; 2280bd756ddeSShun Fu regs->arm_to_pciea_drbl_reg = base + 0x10480; 2281bd756ddeSShun Fu regs->arm_to_pciea_mask_reg = base + 0x10484; 2282bd756ddeSShun Fu regs->pciea_to_arm_msg0 = base + 0x10400; 2283bd756ddeSShun Fu regs->pciea_to_arm_msg1 = base + 0x10404; 2284bd756ddeSShun Fu regs->arm_to_pciea_msg0 = base + 0x10420; 2285bd756ddeSShun Fu regs->arm_to_pciea_msg1 = base + 0x10424; 2286bd756ddeSShun Fu 2287bd756ddeSShun Fu /* For reset*/ 2288bd756ddeSShun Fu regs->reset_request = base + 0x10108; 2289bd756ddeSShun Fu regs->reset_enable = base + 0x1010c; 2290bd756ddeSShun Fu 2291bd756ddeSShun Fu /* For Message Unit */ 2292bd756ddeSShun Fu regs->inb_aval_count_basel = base + 0x4008; 2293bd756ddeSShun Fu regs->inb_aval_count_baseh = base + 0x400C; 2294bd756ddeSShun Fu regs->inb_write_pointer = base + 0x4018; 2295bd756ddeSShun Fu regs->inb_read_pointer = base + 0x401C; 2296bd756ddeSShun Fu regs->outb_copy_basel = base + 0x4058; 2297bd756ddeSShun Fu regs->outb_copy_baseh = base + 0x405C; 2298bd756ddeSShun Fu regs->outb_copy_pointer = base + 0x406C; 2299bd756ddeSShun Fu regs->outb_read_pointer = base + 0x4070; 2300bd756ddeSShun Fu regs->outb_coal_cfg = base + 0x4080; 2301bd756ddeSShun Fu regs->outb_isr_cause = base + 0x4088; 2302bd756ddeSShun Fu /* Bit setting for HW */ 2303bd756ddeSShun Fu regs->int_comaout = 1 << 4; 2304bd756ddeSShun Fu regs->int_dl_cpu2pciea = 1 << 12; 2305bd756ddeSShun Fu regs->int_comaerr = 1 << 29; 2306bd756ddeSShun Fu regs->cl_pointer_toggle = 1 << 14; 2307bd756ddeSShun Fu regs->cl_slot_num_mask = 0x3FFF; 2308bd756ddeSShun Fu regs->clic_irq = 1 << 0; 2309bd756ddeSShun Fu regs->clic_out_err = 1 << 1; 2310bd756ddeSShun Fu regs->int_drbl_int_mask = 0x3FFFFFFF; 2311bd756ddeSShun Fu regs->int_mu = regs->int_dl_cpu2pciea | regs->int_comaout; 2312bd756ddeSShun Fu break; 2313bd756ddeSShun Fu default: 2314bd756ddeSShun Fu return -1; 2315bd756ddeSShun Fu break; 2316bd756ddeSShun Fu } 2317bd756ddeSShun Fu 2318bd756ddeSShun Fu return 0; 2319bd756ddeSShun Fu } 2320bd756ddeSShun Fu 2321f0c568a4SJianyun Li /** 2322f0c568a4SJianyun Li * mvumi_init_fw - Initializes the FW 2323f0c568a4SJianyun Li * @mhba: Adapter soft state 2324f0c568a4SJianyun Li * 2325f0c568a4SJianyun Li * This is the main function for initializing firmware. 2326f0c568a4SJianyun Li */ 2327f0c568a4SJianyun Li static int mvumi_init_fw(struct mvumi_hba *mhba) 2328f0c568a4SJianyun Li { 2329f0c568a4SJianyun Li int ret = 0; 2330f0c568a4SJianyun Li 2331f0c568a4SJianyun Li if (pci_request_regions(mhba->pdev, MV_DRIVER_NAME)) { 2332f0c568a4SJianyun Li dev_err(&mhba->pdev->dev, "IO memory region busy!\n"); 2333f0c568a4SJianyun Li return -EBUSY; 2334f0c568a4SJianyun Li } 2335f0c568a4SJianyun Li ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr); 2336f0c568a4SJianyun Li if (ret) 2337f0c568a4SJianyun Li goto fail_ioremap; 2338f0c568a4SJianyun Li 2339f0c568a4SJianyun Li switch (mhba->pdev->device) { 2340f0c568a4SJianyun Li case PCI_DEVICE_ID_MARVELL_MV9143: 2341bd756ddeSShun Fu mhba->instancet = &mvumi_instance_9143; 2342f0c568a4SJianyun Li mhba->io_seq = 0; 2343f0c568a4SJianyun Li mhba->max_sge = MVUMI_MAX_SG_ENTRY; 2344f0c568a4SJianyun Li mhba->request_id_enabled = 1; 2345f0c568a4SJianyun Li break; 2346bd756ddeSShun Fu case PCI_DEVICE_ID_MARVELL_MV9580: 2347bd756ddeSShun Fu mhba->instancet = &mvumi_instance_9580; 2348bd756ddeSShun Fu mhba->io_seq = 0; 2349bd756ddeSShun Fu mhba->max_sge = MVUMI_MAX_SG_ENTRY; 2350bd756ddeSShun Fu break; 2351f0c568a4SJianyun Li default: 2352f0c568a4SJianyun Li dev_err(&mhba->pdev->dev, "device 0x%x not supported!\n", 2353f0c568a4SJianyun Li mhba->pdev->device); 2354f0c568a4SJianyun Li mhba->instancet = NULL; 2355f0c568a4SJianyun Li ret = -EINVAL; 2356f0c568a4SJianyun Li goto fail_alloc_mem; 2357f0c568a4SJianyun Li } 2358f0c568a4SJianyun Li dev_dbg(&mhba->pdev->dev, "device id : %04X is found.\n", 2359f0c568a4SJianyun Li mhba->pdev->device); 2360bd756ddeSShun Fu ret = mvumi_cfg_hw_reg(mhba); 2361bd756ddeSShun Fu if (ret) { 2362bd756ddeSShun Fu dev_err(&mhba->pdev->dev, 2363bd756ddeSShun Fu "failed to allocate memory for reg\n"); 2364bd756ddeSShun Fu ret = -ENOMEM; 2365bd756ddeSShun Fu goto fail_alloc_mem; 2366bd756ddeSShun Fu } 2367*ab8e7f4bSChristoph Hellwig mhba->handshake_page = dma_alloc_coherent(&mhba->pdev->dev, 2368*ab8e7f4bSChristoph Hellwig HSP_MAX_SIZE, &mhba->handshake_page_phys, GFP_KERNEL); 2369f0c568a4SJianyun Li if (!mhba->handshake_page) { 2370f0c568a4SJianyun Li dev_err(&mhba->pdev->dev, 2371f0c568a4SJianyun Li "failed to allocate memory for handshake\n"); 2372f0c568a4SJianyun Li ret = -ENOMEM; 2373bd756ddeSShun Fu goto fail_alloc_page; 2374f0c568a4SJianyun Li } 2375f0c568a4SJianyun Li 2376f0c568a4SJianyun Li if (mvumi_start(mhba)) { 2377f0c568a4SJianyun Li ret = -EINVAL; 2378f0c568a4SJianyun Li goto fail_ready_state; 2379f0c568a4SJianyun Li } 2380f0c568a4SJianyun Li ret = mvumi_alloc_cmds(mhba); 2381f0c568a4SJianyun Li if (ret) 2382f0c568a4SJianyun Li goto fail_ready_state; 2383f0c568a4SJianyun Li 2384f0c568a4SJianyun Li return 0; 2385f0c568a4SJianyun Li 2386f0c568a4SJianyun Li fail_ready_state: 2387f0c568a4SJianyun Li mvumi_release_mem_resource(mhba); 2388*ab8e7f4bSChristoph Hellwig dma_free_coherent(&mhba->pdev->dev, HSP_MAX_SIZE, 2389bd756ddeSShun Fu mhba->handshake_page, mhba->handshake_page_phys); 2390bd756ddeSShun Fu fail_alloc_page: 2391bd756ddeSShun Fu kfree(mhba->regs); 2392f0c568a4SJianyun Li fail_alloc_mem: 2393f0c568a4SJianyun Li mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr); 2394f0c568a4SJianyun Li fail_ioremap: 2395f0c568a4SJianyun Li pci_release_regions(mhba->pdev); 2396f0c568a4SJianyun Li 2397f0c568a4SJianyun Li return ret; 2398f0c568a4SJianyun Li } 2399f0c568a4SJianyun Li 2400f0c568a4SJianyun Li /** 2401f0c568a4SJianyun Li * mvumi_io_attach - Attaches this driver to SCSI mid-layer 2402f0c568a4SJianyun Li * @mhba: Adapter soft state 2403f0c568a4SJianyun Li */ 2404f0c568a4SJianyun Li static int mvumi_io_attach(struct mvumi_hba *mhba) 2405f0c568a4SJianyun Li { 2406f0c568a4SJianyun Li struct Scsi_Host *host = mhba->shost; 2407bd756ddeSShun Fu struct scsi_device *sdev = NULL; 2408f0c568a4SJianyun Li int ret; 2409f0c568a4SJianyun Li unsigned int max_sg = (mhba->ib_max_size + 4 - 2410f0c568a4SJianyun Li sizeof(struct mvumi_msg_frame)) / sizeof(struct mvumi_sgl); 2411f0c568a4SJianyun Li 2412f0c568a4SJianyun Li host->irq = mhba->pdev->irq; 2413f0c568a4SJianyun Li host->unique_id = mhba->unique_id; 2414f0c568a4SJianyun Li host->can_queue = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1; 2415f0c568a4SJianyun Li host->sg_tablesize = mhba->max_sge > max_sg ? max_sg : mhba->max_sge; 2416f0c568a4SJianyun Li host->max_sectors = mhba->max_transfer_size / 512; 2417f0c568a4SJianyun Li host->cmd_per_lun = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1; 2418f0c568a4SJianyun Li host->max_id = mhba->max_target_id; 2419f0c568a4SJianyun Li host->max_cmd_len = MAX_COMMAND_SIZE; 2420f0c568a4SJianyun Li 2421f0c568a4SJianyun Li ret = scsi_add_host(host, &mhba->pdev->dev); 2422f0c568a4SJianyun Li if (ret) { 2423f0c568a4SJianyun Li dev_err(&mhba->pdev->dev, "scsi_add_host failed\n"); 2424f0c568a4SJianyun Li return ret; 2425f0c568a4SJianyun Li } 2426f0c568a4SJianyun Li mhba->fw_flag |= MVUMI_FW_ATTACH; 2427f0c568a4SJianyun Li 2428bd756ddeSShun Fu mutex_lock(&mhba->sas_discovery_mutex); 2429bd756ddeSShun Fu if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) 2430bd756ddeSShun Fu ret = scsi_add_device(host, 0, mhba->max_target_id - 1, 0); 2431bd756ddeSShun Fu else 2432bd756ddeSShun Fu ret = 0; 2433bd756ddeSShun Fu if (ret) { 2434bd756ddeSShun Fu dev_err(&mhba->pdev->dev, "add virtual device failed\n"); 2435bd756ddeSShun Fu mutex_unlock(&mhba->sas_discovery_mutex); 2436bd756ddeSShun Fu goto fail_add_device; 2437bd756ddeSShun Fu } 2438bd756ddeSShun Fu 2439bd756ddeSShun Fu mhba->dm_thread = kthread_create(mvumi_rescan_bus, 2440bd756ddeSShun Fu mhba, "mvumi_scanthread"); 2441bd756ddeSShun Fu if (IS_ERR(mhba->dm_thread)) { 2442bd756ddeSShun Fu dev_err(&mhba->pdev->dev, 2443bd756ddeSShun Fu "failed to create device scan thread\n"); 2444bd756ddeSShun Fu mutex_unlock(&mhba->sas_discovery_mutex); 2445bd756ddeSShun Fu goto fail_create_thread; 2446bd756ddeSShun Fu } 2447bd756ddeSShun Fu atomic_set(&mhba->pnp_count, 1); 2448bd756ddeSShun Fu wake_up_process(mhba->dm_thread); 2449bd756ddeSShun Fu 2450bd756ddeSShun Fu mutex_unlock(&mhba->sas_discovery_mutex); 2451f0c568a4SJianyun Li return 0; 2452bd756ddeSShun Fu 2453bd756ddeSShun Fu fail_create_thread: 2454bd756ddeSShun Fu if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) 2455bd756ddeSShun Fu sdev = scsi_device_lookup(mhba->shost, 0, 2456bd756ddeSShun Fu mhba->max_target_id - 1, 0); 2457bd756ddeSShun Fu if (sdev) { 2458bd756ddeSShun Fu scsi_remove_device(sdev); 2459bd756ddeSShun Fu scsi_device_put(sdev); 2460bd756ddeSShun Fu } 2461bd756ddeSShun Fu fail_add_device: 2462bd756ddeSShun Fu scsi_remove_host(mhba->shost); 2463bd756ddeSShun Fu return ret; 2464f0c568a4SJianyun Li } 2465f0c568a4SJianyun Li 2466f0c568a4SJianyun Li /** 2467f0c568a4SJianyun Li * mvumi_probe_one - PCI hotplug entry point 2468f0c568a4SJianyun Li * @pdev: PCI device structure 2469f0c568a4SJianyun Li * @id: PCI ids of supported hotplugged adapter 2470f0c568a4SJianyun Li */ 24716f039790SGreg Kroah-Hartman static int mvumi_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) 2472f0c568a4SJianyun Li { 2473f0c568a4SJianyun Li struct Scsi_Host *host; 2474f0c568a4SJianyun Li struct mvumi_hba *mhba; 2475f0c568a4SJianyun Li int ret; 2476f0c568a4SJianyun Li 2477f0c568a4SJianyun Li dev_dbg(&pdev->dev, " %#4.04x:%#4.04x:%#4.04x:%#4.04x: ", 2478f0c568a4SJianyun Li pdev->vendor, pdev->device, pdev->subsystem_vendor, 2479f0c568a4SJianyun Li pdev->subsystem_device); 2480f0c568a4SJianyun Li 2481f0c568a4SJianyun Li ret = pci_enable_device(pdev); 2482f0c568a4SJianyun Li if (ret) 2483f0c568a4SJianyun Li return ret; 2484f0c568a4SJianyun Li 2485*ab8e7f4bSChristoph Hellwig ret = mvumi_pci_set_master(pdev); 2486f0c568a4SJianyun Li if (ret) 2487f0c568a4SJianyun Li goto fail_set_dma_mask; 2488f0c568a4SJianyun Li 2489f0c568a4SJianyun Li host = scsi_host_alloc(&mvumi_template, sizeof(*mhba)); 2490f0c568a4SJianyun Li if (!host) { 2491f0c568a4SJianyun Li dev_err(&pdev->dev, "scsi_host_alloc failed\n"); 2492f0c568a4SJianyun Li ret = -ENOMEM; 2493f0c568a4SJianyun Li goto fail_alloc_instance; 2494f0c568a4SJianyun Li } 2495f0c568a4SJianyun Li mhba = shost_priv(host); 2496f0c568a4SJianyun Li 2497f0c568a4SJianyun Li INIT_LIST_HEAD(&mhba->cmd_pool); 2498f0c568a4SJianyun Li INIT_LIST_HEAD(&mhba->ob_data_list); 2499f0c568a4SJianyun Li INIT_LIST_HEAD(&mhba->free_ob_list); 2500f0c568a4SJianyun Li INIT_LIST_HEAD(&mhba->res_list); 2501f0c568a4SJianyun Li INIT_LIST_HEAD(&mhba->waiting_req_list); 2502bd756ddeSShun Fu mutex_init(&mhba->device_lock); 2503bd756ddeSShun Fu INIT_LIST_HEAD(&mhba->mhba_dev_list); 2504bd756ddeSShun Fu INIT_LIST_HEAD(&mhba->shost_dev_list); 2505f0c568a4SJianyun Li atomic_set(&mhba->fw_outstanding, 0); 2506f0c568a4SJianyun Li init_waitqueue_head(&mhba->int_cmd_wait_q); 2507bd756ddeSShun Fu mutex_init(&mhba->sas_discovery_mutex); 2508f0c568a4SJianyun Li 2509f0c568a4SJianyun Li mhba->pdev = pdev; 2510f0c568a4SJianyun Li mhba->shost = host; 2511f0c568a4SJianyun Li mhba->unique_id = pdev->bus->number << 8 | pdev->devfn; 2512f0c568a4SJianyun Li 2513f0c568a4SJianyun Li ret = mvumi_init_fw(mhba); 2514f0c568a4SJianyun Li if (ret) 2515f0c568a4SJianyun Li goto fail_init_fw; 2516f0c568a4SJianyun Li 2517f0c568a4SJianyun Li ret = request_irq(mhba->pdev->irq, mvumi_isr_handler, IRQF_SHARED, 2518f0c568a4SJianyun Li "mvumi", mhba); 2519f0c568a4SJianyun Li if (ret) { 2520f0c568a4SJianyun Li dev_err(&pdev->dev, "failed to register IRQ\n"); 2521f0c568a4SJianyun Li goto fail_init_irq; 2522f0c568a4SJianyun Li } 2523bd756ddeSShun Fu 2524bd756ddeSShun Fu mhba->instancet->enable_intr(mhba); 2525f0c568a4SJianyun Li pci_set_drvdata(pdev, mhba); 2526f0c568a4SJianyun Li 2527f0c568a4SJianyun Li ret = mvumi_io_attach(mhba); 2528f0c568a4SJianyun Li if (ret) 2529f0c568a4SJianyun Li goto fail_io_attach; 2530bd756ddeSShun Fu 2531bd756ddeSShun Fu mvumi_backup_bar_addr(mhba); 2532f0c568a4SJianyun Li dev_dbg(&pdev->dev, "probe mvumi driver successfully.\n"); 2533f0c568a4SJianyun Li 2534f0c568a4SJianyun Li return 0; 2535f0c568a4SJianyun Li 2536f0c568a4SJianyun Li fail_io_attach: 2537bd756ddeSShun Fu mhba->instancet->disable_intr(mhba); 2538f0c568a4SJianyun Li free_irq(mhba->pdev->irq, mhba); 2539f0c568a4SJianyun Li fail_init_irq: 2540f0c568a4SJianyun Li mvumi_release_fw(mhba); 2541f0c568a4SJianyun Li fail_init_fw: 2542f0c568a4SJianyun Li scsi_host_put(host); 2543f0c568a4SJianyun Li 2544f0c568a4SJianyun Li fail_alloc_instance: 2545f0c568a4SJianyun Li fail_set_dma_mask: 2546f0c568a4SJianyun Li pci_disable_device(pdev); 2547f0c568a4SJianyun Li 2548f0c568a4SJianyun Li return ret; 2549f0c568a4SJianyun Li } 2550f0c568a4SJianyun Li 2551f0c568a4SJianyun Li static void mvumi_detach_one(struct pci_dev *pdev) 2552f0c568a4SJianyun Li { 2553f0c568a4SJianyun Li struct Scsi_Host *host; 2554f0c568a4SJianyun Li struct mvumi_hba *mhba; 2555f0c568a4SJianyun Li 2556f0c568a4SJianyun Li mhba = pci_get_drvdata(pdev); 2557bd756ddeSShun Fu if (mhba->dm_thread) { 2558bd756ddeSShun Fu kthread_stop(mhba->dm_thread); 2559bd756ddeSShun Fu mhba->dm_thread = NULL; 2560bd756ddeSShun Fu } 2561bd756ddeSShun Fu 2562bd756ddeSShun Fu mvumi_detach_devices(mhba); 2563f0c568a4SJianyun Li host = mhba->shost; 2564f0c568a4SJianyun Li scsi_remove_host(mhba->shost); 2565f0c568a4SJianyun Li mvumi_flush_cache(mhba); 2566f0c568a4SJianyun Li 2567bd756ddeSShun Fu mhba->instancet->disable_intr(mhba); 2568f0c568a4SJianyun Li free_irq(mhba->pdev->irq, mhba); 2569f0c568a4SJianyun Li mvumi_release_fw(mhba); 2570f0c568a4SJianyun Li scsi_host_put(host); 2571f0c568a4SJianyun Li pci_disable_device(pdev); 2572f0c568a4SJianyun Li dev_dbg(&pdev->dev, "driver is removed!\n"); 2573f0c568a4SJianyun Li } 2574f0c568a4SJianyun Li 2575f0c568a4SJianyun Li /** 2576f0c568a4SJianyun Li * mvumi_shutdown - Shutdown entry point 2577f0c568a4SJianyun Li * @device: Generic device structure 2578f0c568a4SJianyun Li */ 2579f0c568a4SJianyun Li static void mvumi_shutdown(struct pci_dev *pdev) 2580f0c568a4SJianyun Li { 2581f0c568a4SJianyun Li struct mvumi_hba *mhba = pci_get_drvdata(pdev); 2582f0c568a4SJianyun Li 2583f0c568a4SJianyun Li mvumi_flush_cache(mhba); 2584f0c568a4SJianyun Li } 2585f0c568a4SJianyun Li 2586fddbeb80SArnd Bergmann static int __maybe_unused mvumi_suspend(struct pci_dev *pdev, pm_message_t state) 2587f0c568a4SJianyun Li { 2588f0c568a4SJianyun Li struct mvumi_hba *mhba = NULL; 2589f0c568a4SJianyun Li 2590f0c568a4SJianyun Li mhba = pci_get_drvdata(pdev); 2591f0c568a4SJianyun Li mvumi_flush_cache(mhba); 2592f0c568a4SJianyun Li 2593f0c568a4SJianyun Li pci_set_drvdata(pdev, mhba); 2594bd756ddeSShun Fu mhba->instancet->disable_intr(mhba); 2595f0c568a4SJianyun Li free_irq(mhba->pdev->irq, mhba); 2596f0c568a4SJianyun Li mvumi_unmap_pci_addr(pdev, mhba->base_addr); 2597f0c568a4SJianyun Li pci_release_regions(pdev); 2598f0c568a4SJianyun Li pci_save_state(pdev); 2599f0c568a4SJianyun Li pci_disable_device(pdev); 2600f0c568a4SJianyun Li pci_set_power_state(pdev, pci_choose_state(pdev, state)); 2601f0c568a4SJianyun Li 2602f0c568a4SJianyun Li return 0; 2603f0c568a4SJianyun Li } 2604f0c568a4SJianyun Li 2605fddbeb80SArnd Bergmann static int __maybe_unused mvumi_resume(struct pci_dev *pdev) 2606f0c568a4SJianyun Li { 2607f0c568a4SJianyun Li int ret; 2608f0c568a4SJianyun Li struct mvumi_hba *mhba = NULL; 2609f0c568a4SJianyun Li 2610f0c568a4SJianyun Li mhba = pci_get_drvdata(pdev); 2611f0c568a4SJianyun Li 2612f0c568a4SJianyun Li pci_set_power_state(pdev, PCI_D0); 2613f0c568a4SJianyun Li pci_enable_wake(pdev, PCI_D0, 0); 2614f0c568a4SJianyun Li pci_restore_state(pdev); 2615f0c568a4SJianyun Li 2616f0c568a4SJianyun Li ret = pci_enable_device(pdev); 2617f0c568a4SJianyun Li if (ret) { 2618f0c568a4SJianyun Li dev_err(&pdev->dev, "enable device failed\n"); 2619f0c568a4SJianyun Li return ret; 2620f0c568a4SJianyun Li } 2621*ab8e7f4bSChristoph Hellwig 2622*ab8e7f4bSChristoph Hellwig ret = mvumi_pci_set_master(pdev); 2623f0c568a4SJianyun Li ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2624f0c568a4SJianyun Li if (ret) 2625f0c568a4SJianyun Li goto fail; 2626f0c568a4SJianyun Li ret = pci_request_regions(mhba->pdev, MV_DRIVER_NAME); 2627f0c568a4SJianyun Li if (ret) 2628f0c568a4SJianyun Li goto fail; 2629f0c568a4SJianyun Li ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr); 2630f0c568a4SJianyun Li if (ret) 2631f0c568a4SJianyun Li goto release_regions; 2632f0c568a4SJianyun Li 2633bd756ddeSShun Fu if (mvumi_cfg_hw_reg(mhba)) { 2634bd756ddeSShun Fu ret = -EINVAL; 2635bd756ddeSShun Fu goto unmap_pci_addr; 2636bd756ddeSShun Fu } 2637bd756ddeSShun Fu 2638f0c568a4SJianyun Li mhba->mmio = mhba->base_addr[0]; 2639bd756ddeSShun Fu mvumi_reset(mhba); 2640f0c568a4SJianyun Li 2641f0c568a4SJianyun Li if (mvumi_start(mhba)) { 2642f0c568a4SJianyun Li ret = -EINVAL; 2643f0c568a4SJianyun Li goto unmap_pci_addr; 2644f0c568a4SJianyun Li } 2645f0c568a4SJianyun Li 2646f0c568a4SJianyun Li ret = request_irq(mhba->pdev->irq, mvumi_isr_handler, IRQF_SHARED, 2647f0c568a4SJianyun Li "mvumi", mhba); 2648f0c568a4SJianyun Li if (ret) { 2649f0c568a4SJianyun Li dev_err(&pdev->dev, "failed to register IRQ\n"); 2650f0c568a4SJianyun Li goto unmap_pci_addr; 2651f0c568a4SJianyun Li } 2652bd756ddeSShun Fu mhba->instancet->enable_intr(mhba); 2653f0c568a4SJianyun Li 2654f0c568a4SJianyun Li return 0; 2655f0c568a4SJianyun Li 2656f0c568a4SJianyun Li unmap_pci_addr: 2657f0c568a4SJianyun Li mvumi_unmap_pci_addr(pdev, mhba->base_addr); 2658f0c568a4SJianyun Li release_regions: 2659f0c568a4SJianyun Li pci_release_regions(pdev); 2660f0c568a4SJianyun Li fail: 2661f0c568a4SJianyun Li pci_disable_device(pdev); 2662f0c568a4SJianyun Li 2663f0c568a4SJianyun Li return ret; 2664f0c568a4SJianyun Li } 2665f0c568a4SJianyun Li 2666f0c568a4SJianyun Li static struct pci_driver mvumi_pci_driver = { 2667f0c568a4SJianyun Li 2668f0c568a4SJianyun Li .name = MV_DRIVER_NAME, 2669f0c568a4SJianyun Li .id_table = mvumi_pci_table, 2670f0c568a4SJianyun Li .probe = mvumi_probe_one, 26716f039790SGreg Kroah-Hartman .remove = mvumi_detach_one, 2672f0c568a4SJianyun Li .shutdown = mvumi_shutdown, 2673f0c568a4SJianyun Li #ifdef CONFIG_PM 2674f0c568a4SJianyun Li .suspend = mvumi_suspend, 2675f0c568a4SJianyun Li .resume = mvumi_resume, 2676f0c568a4SJianyun Li #endif 2677f0c568a4SJianyun Li }; 2678f0c568a4SJianyun Li 2679f9c25ccfSYueHaibing module_pci_driver(mvumi_pci_driver); 2680