1 /* 2 * Adaptec AAC series RAID controller driver 3 * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com> 4 * 5 * based on the old aacraid driver that is.. 6 * Adaptec aacraid device driver for Linux. 7 * 8 * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com) 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2, or (at your option) 13 * any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; see the file COPYING. If not, write to 22 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 23 * 24 * Module Name: 25 * comminit.c 26 * 27 * Abstract: This supports the initialization of the host adapter commuication interface. 28 * This is a platform dependent module for the pci cyclone board. 29 * 30 */ 31 32 #include <linux/kernel.h> 33 #include <linux/init.h> 34 #include <linux/types.h> 35 #include <linux/sched.h> 36 #include <linux/pci.h> 37 #include <linux/spinlock.h> 38 #include <linux/slab.h> 39 #include <linux/blkdev.h> 40 #include <linux/completion.h> 41 #include <linux/mm.h> 42 #include <scsi/scsi_host.h> 43 #include <asm/semaphore.h> 44 45 #include "aacraid.h" 46 47 struct aac_common aac_config; 48 49 static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long commsize, unsigned long commalign) 50 { 51 unsigned char *base; 52 unsigned long size, align; 53 const unsigned long fibsize = 4096; 54 const unsigned long printfbufsiz = 256; 55 struct aac_init *init; 56 dma_addr_t phys; 57 58 size = fibsize + sizeof(struct aac_init) + commsize + commalign + printfbufsiz; 59 60 61 base = pci_alloc_consistent(dev->pdev, size, &phys); 62 63 if(base == NULL) 64 { 65 printk(KERN_ERR "aacraid: unable to create mapping.\n"); 66 return 0; 67 } 68 dev->comm_addr = (void *)base; 69 dev->comm_phys = phys; 70 dev->comm_size = size; 71 72 dev->init = (struct aac_init *)(base + fibsize); 73 dev->init_pa = phys + fibsize; 74 75 init = dev->init; 76 77 init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION); 78 if (dev->max_fib_size != sizeof(struct hw_fib)) 79 init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4); 80 init->MiniPortRevision = cpu_to_le32(Sa_MINIPORT_REVISION); 81 init->fsrev = cpu_to_le32(dev->fsrev); 82 83 /* 84 * Adapter Fibs are the first thing allocated so that they 85 * start page aligned 86 */ 87 dev->aif_base_va = (struct hw_fib *)base; 88 89 init->AdapterFibsVirtualAddress = 0; 90 init->AdapterFibsPhysicalAddress = cpu_to_le32((u32)phys); 91 init->AdapterFibsSize = cpu_to_le32(fibsize); 92 init->AdapterFibAlign = cpu_to_le32(sizeof(struct hw_fib)); 93 /* 94 * number of 4k pages of host physical memory. The aacraid fw needs 95 * this number to be less than 4gb worth of pages. num_physpages is in 96 * system page units. New firmware doesn't have any issues with the 97 * mapping system, but older Firmware did, and had *troubles* dealing 98 * with the math overloading past 32 bits, thus we must limit this 99 * field. 100 * 101 * This assumes the memory is mapped zero->n, which isnt 102 * always true on real computers. It also has some slight problems 103 * with the GART on x86-64. I've btw never tried DMA from PCI space 104 * on this platform but don't be suprised if its problematic. 105 */ 106 #ifndef CONFIG_GART_IOMMU 107 if ((num_physpages << (PAGE_SHIFT - 12)) <= AAC_MAX_HOSTPHYSMEMPAGES) { 108 init->HostPhysMemPages = 109 cpu_to_le32(num_physpages << (PAGE_SHIFT-12)); 110 } else 111 #endif 112 { 113 init->HostPhysMemPages = cpu_to_le32(AAC_MAX_HOSTPHYSMEMPAGES); 114 } 115 116 init->InitFlags = 0; 117 init->MaxIoCommands = cpu_to_le32(dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); 118 init->MaxIoSize = cpu_to_le32(dev->scsi_host_ptr->max_sectors << 9); 119 init->MaxFibSize = cpu_to_le32(dev->max_fib_size); 120 121 /* 122 * Increment the base address by the amount already used 123 */ 124 base = base + fibsize + sizeof(struct aac_init); 125 phys = (dma_addr_t)((ulong)phys + fibsize + sizeof(struct aac_init)); 126 /* 127 * Align the beginning of Headers to commalign 128 */ 129 align = (commalign - ((unsigned long)(base) & (commalign - 1))); 130 base = base + align; 131 phys = phys + align; 132 /* 133 * Fill in addresses of the Comm Area Headers and Queues 134 */ 135 *commaddr = base; 136 init->CommHeaderAddress = cpu_to_le32((u32)phys); 137 /* 138 * Increment the base address by the size of the CommArea 139 */ 140 base = base + commsize; 141 phys = phys + commsize; 142 /* 143 * Place the Printf buffer area after the Fast I/O comm area. 144 */ 145 dev->printfbuf = (void *)base; 146 init->printfbuf = cpu_to_le32(phys); 147 init->printfbufsiz = cpu_to_le32(printfbufsiz); 148 memset(base, 0, printfbufsiz); 149 return 1; 150 } 151 152 static void aac_queue_init(struct aac_dev * dev, struct aac_queue * q, u32 *mem, int qsize) 153 { 154 q->numpending = 0; 155 q->dev = dev; 156 INIT_LIST_HEAD(&q->pendingq); 157 init_waitqueue_head(&q->cmdready); 158 INIT_LIST_HEAD(&q->cmdq); 159 init_waitqueue_head(&q->qfull); 160 spin_lock_init(&q->lockdata); 161 q->lock = &q->lockdata; 162 q->headers.producer = (__le32 *)mem; 163 q->headers.consumer = (__le32 *)(mem+1); 164 *(q->headers.producer) = cpu_to_le32(qsize); 165 *(q->headers.consumer) = cpu_to_le32(qsize); 166 q->entries = qsize; 167 } 168 169 /** 170 * aac_send_shutdown - shutdown an adapter 171 * @dev: Adapter to shutdown 172 * 173 * This routine will send a VM_CloseAll (shutdown) request to the adapter. 174 */ 175 176 int aac_send_shutdown(struct aac_dev * dev) 177 { 178 struct fib * fibctx; 179 struct aac_close *cmd; 180 int status; 181 182 fibctx = fib_alloc(dev); 183 if (!fibctx) 184 return -ENOMEM; 185 fib_init(fibctx); 186 187 cmd = (struct aac_close *) fib_data(fibctx); 188 189 cmd->command = cpu_to_le32(VM_CloseAll); 190 cmd->cid = cpu_to_le32(0xffffffff); 191 192 status = fib_send(ContainerCommand, 193 fibctx, 194 sizeof(struct aac_close), 195 FsaNormal, 196 1, 1, 197 NULL, NULL); 198 199 if (status == 0) 200 fib_complete(fibctx); 201 fib_free(fibctx); 202 return status; 203 } 204 205 /** 206 * aac_comm_init - Initialise FSA data structures 207 * @dev: Adapter to initialise 208 * 209 * Initializes the data structures that are required for the FSA commuication 210 * interface to operate. 211 * Returns 212 * 1 - if we were able to init the commuication interface. 213 * 0 - If there were errors initing. This is a fatal error. 214 */ 215 216 static int aac_comm_init(struct aac_dev * dev) 217 { 218 unsigned long hdrsize = (sizeof(u32) * NUMBER_OF_COMM_QUEUES) * 2; 219 unsigned long queuesize = sizeof(struct aac_entry) * TOTAL_QUEUE_ENTRIES; 220 u32 *headers; 221 struct aac_entry * queues; 222 unsigned long size; 223 struct aac_queue_block * comm = dev->queues; 224 /* 225 * Now allocate and initialize the zone structures used as our 226 * pool of FIB context records. The size of the zone is based 227 * on the system memory size. We also initialize the mutex used 228 * to protect the zone. 229 */ 230 spin_lock_init(&dev->fib_lock); 231 232 /* 233 * Allocate the physically contigous space for the commuication 234 * queue headers. 235 */ 236 237 size = hdrsize + queuesize; 238 239 if (!aac_alloc_comm(dev, (void * *)&headers, size, QUEUE_ALIGNMENT)) 240 return -ENOMEM; 241 242 queues = (struct aac_entry *)(((ulong)headers) + hdrsize); 243 244 /* Adapter to Host normal priority Command queue */ 245 comm->queue[HostNormCmdQueue].base = queues; 246 aac_queue_init(dev, &comm->queue[HostNormCmdQueue], headers, HOST_NORM_CMD_ENTRIES); 247 queues += HOST_NORM_CMD_ENTRIES; 248 headers += 2; 249 250 /* Adapter to Host high priority command queue */ 251 comm->queue[HostHighCmdQueue].base = queues; 252 aac_queue_init(dev, &comm->queue[HostHighCmdQueue], headers, HOST_HIGH_CMD_ENTRIES); 253 254 queues += HOST_HIGH_CMD_ENTRIES; 255 headers +=2; 256 257 /* Host to adapter normal priority command queue */ 258 comm->queue[AdapNormCmdQueue].base = queues; 259 aac_queue_init(dev, &comm->queue[AdapNormCmdQueue], headers, ADAP_NORM_CMD_ENTRIES); 260 261 queues += ADAP_NORM_CMD_ENTRIES; 262 headers += 2; 263 264 /* host to adapter high priority command queue */ 265 comm->queue[AdapHighCmdQueue].base = queues; 266 aac_queue_init(dev, &comm->queue[AdapHighCmdQueue], headers, ADAP_HIGH_CMD_ENTRIES); 267 268 queues += ADAP_HIGH_CMD_ENTRIES; 269 headers += 2; 270 271 /* adapter to host normal priority response queue */ 272 comm->queue[HostNormRespQueue].base = queues; 273 aac_queue_init(dev, &comm->queue[HostNormRespQueue], headers, HOST_NORM_RESP_ENTRIES); 274 queues += HOST_NORM_RESP_ENTRIES; 275 headers += 2; 276 277 /* adapter to host high priority response queue */ 278 comm->queue[HostHighRespQueue].base = queues; 279 aac_queue_init(dev, &comm->queue[HostHighRespQueue], headers, HOST_HIGH_RESP_ENTRIES); 280 281 queues += HOST_HIGH_RESP_ENTRIES; 282 headers += 2; 283 284 /* host to adapter normal priority response queue */ 285 comm->queue[AdapNormRespQueue].base = queues; 286 aac_queue_init(dev, &comm->queue[AdapNormRespQueue], headers, ADAP_NORM_RESP_ENTRIES); 287 288 queues += ADAP_NORM_RESP_ENTRIES; 289 headers += 2; 290 291 /* host to adapter high priority response queue */ 292 comm->queue[AdapHighRespQueue].base = queues; 293 aac_queue_init(dev, &comm->queue[AdapHighRespQueue], headers, ADAP_HIGH_RESP_ENTRIES); 294 295 comm->queue[AdapNormCmdQueue].lock = comm->queue[HostNormRespQueue].lock; 296 comm->queue[AdapHighCmdQueue].lock = comm->queue[HostHighRespQueue].lock; 297 comm->queue[AdapNormRespQueue].lock = comm->queue[HostNormCmdQueue].lock; 298 comm->queue[AdapHighRespQueue].lock = comm->queue[HostHighCmdQueue].lock; 299 300 return 0; 301 } 302 303 struct aac_dev *aac_init_adapter(struct aac_dev *dev) 304 { 305 u32 status[5]; 306 struct Scsi_Host * host = dev->scsi_host_ptr; 307 308 /* 309 * Check the preferred comm settings, defaults from template. 310 */ 311 dev->max_fib_size = sizeof(struct hw_fib); 312 dev->sg_tablesize = host->sg_tablesize = (dev->max_fib_size 313 - sizeof(struct aac_fibhdr) 314 - sizeof(struct aac_write) + sizeof(struct sgmap)) 315 / sizeof(struct sgmap); 316 if ((!aac_adapter_sync_cmd(dev, GET_COMM_PREFERRED_SETTINGS, 317 0, 0, 0, 0, 0, 0, 318 status+0, status+1, status+2, status+3, status+4)) 319 && (status[0] == 0x00000001)) { 320 /* 321 * status[1] >> 16 maximum command size in KB 322 * status[1] & 0xFFFF maximum FIB size 323 * status[2] >> 16 maximum SG elements to driver 324 * status[2] & 0xFFFF maximum SG elements from driver 325 * status[3] & 0xFFFF maximum number FIBs outstanding 326 */ 327 host->max_sectors = (status[1] >> 16) << 1; 328 dev->max_fib_size = status[1] & 0xFFFF; 329 host->sg_tablesize = status[2] >> 16; 330 dev->sg_tablesize = status[2] & 0xFFFF; 331 host->can_queue = (status[3] & 0xFFFF) - AAC_NUM_MGT_FIB; 332 /* 333 * NOTE: 334 * All these overrides are based on a fixed internal 335 * knowledge and understanding of existing adapters, 336 * acbsize should be set with caution. 337 */ 338 if (acbsize == 512) { 339 host->max_sectors = AAC_MAX_32BIT_SGBCOUNT; 340 dev->max_fib_size = 512; 341 dev->sg_tablesize = host->sg_tablesize 342 = (512 - sizeof(struct aac_fibhdr) 343 - sizeof(struct aac_write) + sizeof(struct sgmap)) 344 / sizeof(struct sgmap); 345 host->can_queue = AAC_NUM_IO_FIB; 346 } else if (acbsize == 2048) { 347 host->max_sectors = 512; 348 dev->max_fib_size = 2048; 349 host->sg_tablesize = 65; 350 dev->sg_tablesize = 81; 351 host->can_queue = 512 - AAC_NUM_MGT_FIB; 352 } else if (acbsize == 4096) { 353 host->max_sectors = 1024; 354 dev->max_fib_size = 4096; 355 host->sg_tablesize = 129; 356 dev->sg_tablesize = 166; 357 host->can_queue = 256 - AAC_NUM_MGT_FIB; 358 } else if (acbsize == 8192) { 359 host->max_sectors = 2048; 360 dev->max_fib_size = 8192; 361 host->sg_tablesize = 257; 362 dev->sg_tablesize = 337; 363 host->can_queue = 128 - AAC_NUM_MGT_FIB; 364 } else if (acbsize > 0) { 365 printk("Illegal acbsize=%d ignored\n", acbsize); 366 } 367 } 368 { 369 370 if (numacb > 0) { 371 if (numacb < host->can_queue) 372 host->can_queue = numacb; 373 else 374 printk("numacb=%d ignored\n", numacb); 375 } 376 } 377 378 /* 379 * Ok now init the communication subsystem 380 */ 381 382 dev->queues = (struct aac_queue_block *) kmalloc(sizeof(struct aac_queue_block), GFP_KERNEL); 383 if (dev->queues == NULL) { 384 printk(KERN_ERR "Error could not allocate comm region.\n"); 385 return NULL; 386 } 387 memset(dev->queues, 0, sizeof(struct aac_queue_block)); 388 389 if (aac_comm_init(dev)<0){ 390 kfree(dev->queues); 391 return NULL; 392 } 393 /* 394 * Initialize the list of fibs 395 */ 396 if(fib_setup(dev)<0){ 397 kfree(dev->queues); 398 return NULL; 399 } 400 401 INIT_LIST_HEAD(&dev->fib_list); 402 init_completion(&dev->aif_completion); 403 404 return dev; 405 } 406 407 408