xref: /linux/drivers/net/wireless/ath/ath6kl/sdio.c (revision f2ee442115c9b6219083c019939a9cc0c9abb2f8)
1 /*
2  * Copyright (c) 2004-2011 Atheros Communications Inc.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include <linux/module.h>
18 #include <linux/mmc/card.h>
19 #include <linux/mmc/mmc.h>
20 #include <linux/mmc/host.h>
21 #include <linux/mmc/sdio_func.h>
22 #include <linux/mmc/sdio_ids.h>
23 #include <linux/mmc/sdio.h>
24 #include <linux/mmc/sd.h>
25 #include "htc_hif.h"
26 #include "hif-ops.h"
27 #include "target.h"
28 #include "debug.h"
29 #include "cfg80211.h"
30 
31 struct ath6kl_sdio {
32 	struct sdio_func *func;
33 
34 	spinlock_t lock;
35 
36 	/* free list */
37 	struct list_head bus_req_freeq;
38 
39 	/* available bus requests */
40 	struct bus_request bus_req[BUS_REQUEST_MAX_NUM];
41 
42 	struct ath6kl *ar;
43 	u8 *dma_buffer;
44 
45 	/* scatter request list head */
46 	struct list_head scat_req;
47 
48 	spinlock_t scat_lock;
49 	bool is_disabled;
50 	atomic_t irq_handling;
51 	const struct sdio_device_id *id;
52 	struct work_struct wr_async_work;
53 	struct list_head wr_asyncq;
54 	spinlock_t wr_async_lock;
55 };
56 
57 #define CMD53_ARG_READ          0
58 #define CMD53_ARG_WRITE         1
59 #define CMD53_ARG_BLOCK_BASIS   1
60 #define CMD53_ARG_FIXED_ADDRESS 0
61 #define CMD53_ARG_INCR_ADDRESS  1
62 
63 static inline struct ath6kl_sdio *ath6kl_sdio_priv(struct ath6kl *ar)
64 {
65 	return ar->hif_priv;
66 }
67 
68 /*
69  * Macro to check if DMA buffer is WORD-aligned and DMA-able.
70  * Most host controllers assume the buffer is DMA'able and will
71  * bug-check otherwise (i.e. buffers on the stack). virt_addr_valid
72  * check fails on stack memory.
73  */
74 static inline bool buf_needs_bounce(u8 *buf)
75 {
76 	return ((unsigned long) buf & 0x3) || !virt_addr_valid(buf);
77 }
78 
79 static void ath6kl_sdio_set_mbox_info(struct ath6kl *ar)
80 {
81 	struct ath6kl_mbox_info *mbox_info = &ar->mbox_info;
82 
83 	/* EP1 has an extended range */
84 	mbox_info->htc_addr = HIF_MBOX_BASE_ADDR;
85 	mbox_info->htc_ext_addr = HIF_MBOX0_EXT_BASE_ADDR;
86 	mbox_info->htc_ext_sz = HIF_MBOX0_EXT_WIDTH;
87 	mbox_info->block_size = HIF_MBOX_BLOCK_SIZE;
88 	mbox_info->gmbox_addr = HIF_GMBOX_BASE_ADDR;
89 	mbox_info->gmbox_sz = HIF_GMBOX_WIDTH;
90 }
91 
92 static inline void ath6kl_sdio_set_cmd53_arg(u32 *arg, u8 rw, u8 func,
93 					     u8 mode, u8 opcode, u32 addr,
94 					     u16 blksz)
95 {
96 	*arg = (((rw & 1) << 31) |
97 		((func & 0x7) << 28) |
98 		((mode & 1) << 27) |
99 		((opcode & 1) << 26) |
100 		((addr & 0x1FFFF) << 9) |
101 		(blksz & 0x1FF));
102 }
103 
104 static inline void ath6kl_sdio_set_cmd52_arg(u32 *arg, u8 write, u8 raw,
105 					     unsigned int address,
106 					     unsigned char val)
107 {
108 	const u8 func = 0;
109 
110 	*arg = ((write & 1) << 31) |
111 	       ((func & 0x7) << 28) |
112 	       ((raw & 1) << 27) |
113 	       (1 << 26) |
114 	       ((address & 0x1FFFF) << 9) |
115 	       (1 << 8) |
116 	       (val & 0xFF);
117 }
118 
119 static int ath6kl_sdio_func0_cmd52_wr_byte(struct mmc_card *card,
120 					   unsigned int address,
121 					   unsigned char byte)
122 {
123 	struct mmc_command io_cmd;
124 
125 	memset(&io_cmd, 0, sizeof(io_cmd));
126 	ath6kl_sdio_set_cmd52_arg(&io_cmd.arg, 1, 0, address, byte);
127 	io_cmd.opcode = SD_IO_RW_DIRECT;
128 	io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
129 
130 	return mmc_wait_for_cmd(card->host, &io_cmd, 0);
131 }
132 
133 static int ath6kl_sdio_io(struct sdio_func *func, u32 request, u32 addr,
134 			  u8 *buf, u32 len)
135 {
136 	int ret = 0;
137 
138 	if (request & HIF_WRITE) {
139 		/* FIXME: looks like ugly workaround for something */
140 		if (addr >= HIF_MBOX_BASE_ADDR &&
141 		    addr <= HIF_MBOX_END_ADDR)
142 			addr += (HIF_MBOX_WIDTH - len);
143 
144 		/* FIXME: this also looks like ugly workaround */
145 		if (addr == HIF_MBOX0_EXT_BASE_ADDR)
146 			addr += HIF_MBOX0_EXT_WIDTH - len;
147 
148 		if (request & HIF_FIXED_ADDRESS)
149 			ret = sdio_writesb(func, addr, buf, len);
150 		else
151 			ret = sdio_memcpy_toio(func, addr, buf, len);
152 	} else {
153 		if (request & HIF_FIXED_ADDRESS)
154 			ret = sdio_readsb(func, buf, addr, len);
155 		else
156 			ret = sdio_memcpy_fromio(func, buf, addr, len);
157 	}
158 
159 	ath6kl_dbg(ATH6KL_DBG_SDIO, "%s addr 0x%x%s buf 0x%p len %d\n",
160 		   request & HIF_WRITE ? "wr" : "rd", addr,
161 		   request & HIF_FIXED_ADDRESS ? " (fixed)" : "", buf, len);
162 	ath6kl_dbg_dump(ATH6KL_DBG_SDIO_DUMP, NULL, "sdio ", buf, len);
163 
164 	return ret;
165 }
166 
167 static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio)
168 {
169 	struct bus_request *bus_req;
170 	unsigned long flag;
171 
172 	spin_lock_irqsave(&ar_sdio->lock, flag);
173 
174 	if (list_empty(&ar_sdio->bus_req_freeq)) {
175 		spin_unlock_irqrestore(&ar_sdio->lock, flag);
176 		return NULL;
177 	}
178 
179 	bus_req = list_first_entry(&ar_sdio->bus_req_freeq,
180 				   struct bus_request, list);
181 	list_del(&bus_req->list);
182 
183 	spin_unlock_irqrestore(&ar_sdio->lock, flag);
184 	ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n",
185 		   __func__, bus_req);
186 
187 	return bus_req;
188 }
189 
190 static void ath6kl_sdio_free_bus_req(struct ath6kl_sdio *ar_sdio,
191 				     struct bus_request *bus_req)
192 {
193 	unsigned long flag;
194 
195 	ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n",
196 		   __func__, bus_req);
197 
198 	spin_lock_irqsave(&ar_sdio->lock, flag);
199 	list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq);
200 	spin_unlock_irqrestore(&ar_sdio->lock, flag);
201 }
202 
203 static void ath6kl_sdio_setup_scat_data(struct hif_scatter_req *scat_req,
204 					struct mmc_data *data)
205 {
206 	struct scatterlist *sg;
207 	int i;
208 
209 	data->blksz = HIF_MBOX_BLOCK_SIZE;
210 	data->blocks = scat_req->len / HIF_MBOX_BLOCK_SIZE;
211 
212 	ath6kl_dbg(ATH6KL_DBG_SCATTER,
213 		   "hif-scatter: (%s) addr: 0x%X, (block len: %d, block count: %d) , (tot:%d,sg:%d)\n",
214 		   (scat_req->req & HIF_WRITE) ? "WR" : "RD", scat_req->addr,
215 		   data->blksz, data->blocks, scat_req->len,
216 		   scat_req->scat_entries);
217 
218 	data->flags = (scat_req->req & HIF_WRITE) ? MMC_DATA_WRITE :
219 						    MMC_DATA_READ;
220 
221 	/* fill SG entries */
222 	sg = scat_req->sgentries;
223 	sg_init_table(sg, scat_req->scat_entries);
224 
225 	/* assemble SG list */
226 	for (i = 0; i < scat_req->scat_entries; i++, sg++) {
227 		ath6kl_dbg(ATH6KL_DBG_SCATTER, "%d: addr:0x%p, len:%d\n",
228 			   i, scat_req->scat_list[i].buf,
229 			   scat_req->scat_list[i].len);
230 
231 		sg_set_buf(sg, scat_req->scat_list[i].buf,
232 			   scat_req->scat_list[i].len);
233 	}
234 
235 	/* set scatter-gather table for request */
236 	data->sg = scat_req->sgentries;
237 	data->sg_len = scat_req->scat_entries;
238 }
239 
240 static int ath6kl_sdio_scat_rw(struct ath6kl_sdio *ar_sdio,
241 			       struct bus_request *req)
242 {
243 	struct mmc_request mmc_req;
244 	struct mmc_command cmd;
245 	struct mmc_data data;
246 	struct hif_scatter_req *scat_req;
247 	u8 opcode, rw;
248 	int status, len;
249 
250 	scat_req = req->scat_req;
251 
252 	if (scat_req->virt_scat) {
253 		len = scat_req->len;
254 		if (scat_req->req & HIF_BLOCK_BASIS)
255 			len = round_down(len, HIF_MBOX_BLOCK_SIZE);
256 
257 		status = ath6kl_sdio_io(ar_sdio->func, scat_req->req,
258 					scat_req->addr, scat_req->virt_dma_buf,
259 					len);
260 		goto scat_complete;
261 	}
262 
263 	memset(&mmc_req, 0, sizeof(struct mmc_request));
264 	memset(&cmd, 0, sizeof(struct mmc_command));
265 	memset(&data, 0, sizeof(struct mmc_data));
266 
267 	ath6kl_sdio_setup_scat_data(scat_req, &data);
268 
269 	opcode = (scat_req->req & HIF_FIXED_ADDRESS) ?
270 		  CMD53_ARG_FIXED_ADDRESS : CMD53_ARG_INCR_ADDRESS;
271 
272 	rw = (scat_req->req & HIF_WRITE) ? CMD53_ARG_WRITE : CMD53_ARG_READ;
273 
274 	/* Fixup the address so that the last byte will fall on MBOX EOM */
275 	if (scat_req->req & HIF_WRITE) {
276 		if (scat_req->addr == HIF_MBOX_BASE_ADDR)
277 			scat_req->addr += HIF_MBOX_WIDTH - scat_req->len;
278 		else
279 			/* Uses extended address range */
280 			scat_req->addr += HIF_MBOX0_EXT_WIDTH - scat_req->len;
281 	}
282 
283 	/* set command argument */
284 	ath6kl_sdio_set_cmd53_arg(&cmd.arg, rw, ar_sdio->func->num,
285 				  CMD53_ARG_BLOCK_BASIS, opcode, scat_req->addr,
286 				  data.blocks);
287 
288 	cmd.opcode = SD_IO_RW_EXTENDED;
289 	cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
290 
291 	mmc_req.cmd = &cmd;
292 	mmc_req.data = &data;
293 
294 	mmc_set_data_timeout(&data, ar_sdio->func->card);
295 	/* synchronous call to process request */
296 	mmc_wait_for_req(ar_sdio->func->card->host, &mmc_req);
297 
298 	status = cmd.error ? cmd.error : data.error;
299 
300 scat_complete:
301 	scat_req->status = status;
302 
303 	if (scat_req->status)
304 		ath6kl_err("Scatter write request failed:%d\n",
305 			   scat_req->status);
306 
307 	if (scat_req->req & HIF_ASYNCHRONOUS)
308 		scat_req->complete(ar_sdio->ar->htc_target, scat_req);
309 
310 	return status;
311 }
312 
313 static int ath6kl_sdio_alloc_prep_scat_req(struct ath6kl_sdio *ar_sdio,
314 					   int n_scat_entry, int n_scat_req,
315 					   bool virt_scat)
316 {
317 	struct hif_scatter_req *s_req;
318 	struct bus_request *bus_req;
319 	int i, scat_req_sz, scat_list_sz, sg_sz, buf_sz;
320 	u8 *virt_buf;
321 
322 	scat_list_sz = (n_scat_entry - 1) * sizeof(struct hif_scatter_item);
323 	scat_req_sz = sizeof(*s_req) + scat_list_sz;
324 
325 	if (!virt_scat)
326 		sg_sz = sizeof(struct scatterlist) * n_scat_entry;
327 	else
328 		buf_sz =  2 * L1_CACHE_BYTES +
329 			  ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER;
330 
331 	for (i = 0; i < n_scat_req; i++) {
332 		/* allocate the scatter request */
333 		s_req = kzalloc(scat_req_sz, GFP_KERNEL);
334 		if (!s_req)
335 			return -ENOMEM;
336 
337 		if (virt_scat) {
338 			virt_buf = kzalloc(buf_sz, GFP_KERNEL);
339 			if (!virt_buf) {
340 				kfree(s_req);
341 				return -ENOMEM;
342 			}
343 
344 			s_req->virt_dma_buf =
345 				(u8 *)L1_CACHE_ALIGN((unsigned long)virt_buf);
346 		} else {
347 			/* allocate sglist */
348 			s_req->sgentries = kzalloc(sg_sz, GFP_KERNEL);
349 
350 			if (!s_req->sgentries) {
351 				kfree(s_req);
352 				return -ENOMEM;
353 			}
354 		}
355 
356 		/* allocate a bus request for this scatter request */
357 		bus_req = ath6kl_sdio_alloc_busreq(ar_sdio);
358 		if (!bus_req) {
359 			kfree(s_req->sgentries);
360 			kfree(s_req->virt_dma_buf);
361 			kfree(s_req);
362 			return -ENOMEM;
363 		}
364 
365 		/* assign the scatter request to this bus request */
366 		bus_req->scat_req = s_req;
367 		s_req->busrequest = bus_req;
368 
369 		s_req->virt_scat = virt_scat;
370 
371 		/* add it to the scatter pool */
372 		hif_scatter_req_add(ar_sdio->ar, s_req);
373 	}
374 
375 	return 0;
376 }
377 
378 static int ath6kl_sdio_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
379 				       u32 len, u32 request)
380 {
381 	struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
382 	u8  *tbuf = NULL;
383 	int ret;
384 	bool bounced = false;
385 
386 	if (request & HIF_BLOCK_BASIS)
387 		len = round_down(len, HIF_MBOX_BLOCK_SIZE);
388 
389 	if (buf_needs_bounce(buf)) {
390 		if (!ar_sdio->dma_buffer)
391 			return -ENOMEM;
392 		tbuf = ar_sdio->dma_buffer;
393 		memcpy(tbuf, buf, len);
394 		bounced = true;
395 	} else
396 		tbuf = buf;
397 
398 	sdio_claim_host(ar_sdio->func);
399 	ret = ath6kl_sdio_io(ar_sdio->func, request, addr, tbuf, len);
400 	if ((request & HIF_READ) && bounced)
401 		memcpy(buf, tbuf, len);
402 	sdio_release_host(ar_sdio->func);
403 
404 	return ret;
405 }
406 
407 static void __ath6kl_sdio_write_async(struct ath6kl_sdio *ar_sdio,
408 				      struct bus_request *req)
409 {
410 	if (req->scat_req)
411 		ath6kl_sdio_scat_rw(ar_sdio, req);
412 	else {
413 		void *context;
414 		int status;
415 
416 		status = ath6kl_sdio_read_write_sync(ar_sdio->ar, req->address,
417 						     req->buffer, req->length,
418 						     req->request);
419 		context = req->packet;
420 		ath6kl_sdio_free_bus_req(ar_sdio, req);
421 		ath6kldev_rw_comp_handler(context, status);
422 	}
423 }
424 
425 static void ath6kl_sdio_write_async_work(struct work_struct *work)
426 {
427 	struct ath6kl_sdio *ar_sdio;
428 	unsigned long flags;
429 	struct bus_request *req, *tmp_req;
430 
431 	ar_sdio = container_of(work, struct ath6kl_sdio, wr_async_work);
432 	sdio_claim_host(ar_sdio->func);
433 
434 	spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
435 	list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
436 		list_del(&req->list);
437 		spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
438 		__ath6kl_sdio_write_async(ar_sdio, req);
439 		spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
440 	}
441 	spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
442 
443 	sdio_release_host(ar_sdio->func);
444 }
445 
446 static void ath6kl_sdio_irq_handler(struct sdio_func *func)
447 {
448 	int status;
449 	struct ath6kl_sdio *ar_sdio;
450 
451 	ath6kl_dbg(ATH6KL_DBG_SDIO, "irq\n");
452 
453 	ar_sdio = sdio_get_drvdata(func);
454 	atomic_set(&ar_sdio->irq_handling, 1);
455 
456 	/*
457 	 * Release the host during interrups so we can pick it back up when
458 	 * we process commands.
459 	 */
460 	sdio_release_host(ar_sdio->func);
461 
462 	status = ath6kldev_intr_bh_handler(ar_sdio->ar);
463 	sdio_claim_host(ar_sdio->func);
464 	atomic_set(&ar_sdio->irq_handling, 0);
465 	WARN_ON(status && status != -ECANCELED);
466 }
467 
468 static int ath6kl_sdio_power_on(struct ath6kl_sdio *ar_sdio)
469 {
470 	struct sdio_func *func = ar_sdio->func;
471 	int ret = 0;
472 
473 	if (!ar_sdio->is_disabled)
474 		return 0;
475 
476 	sdio_claim_host(func);
477 
478 	ret = sdio_enable_func(func);
479 	if (ret) {
480 		ath6kl_err("Unable to enable sdio func: %d)\n", ret);
481 		sdio_release_host(func);
482 		return ret;
483 	}
484 
485 	sdio_release_host(func);
486 
487 	/*
488 	 * Wait for hardware to initialise. It should take a lot less than
489 	 * 10 ms but let's be conservative here.
490 	 */
491 	msleep(10);
492 
493 	ar_sdio->is_disabled = false;
494 
495 	return ret;
496 }
497 
498 static int ath6kl_sdio_power_off(struct ath6kl_sdio *ar_sdio)
499 {
500 	int ret;
501 
502 	if (ar_sdio->is_disabled)
503 		return 0;
504 
505 	/* Disable the card */
506 	sdio_claim_host(ar_sdio->func);
507 	ret = sdio_disable_func(ar_sdio->func);
508 	sdio_release_host(ar_sdio->func);
509 
510 	if (ret)
511 		return ret;
512 
513 	ar_sdio->is_disabled = true;
514 
515 	return ret;
516 }
517 
518 static int ath6kl_sdio_write_async(struct ath6kl *ar, u32 address, u8 *buffer,
519 				   u32 length, u32 request,
520 				   struct htc_packet *packet)
521 {
522 	struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
523 	struct bus_request *bus_req;
524 	unsigned long flags;
525 
526 	bus_req = ath6kl_sdio_alloc_busreq(ar_sdio);
527 
528 	if (!bus_req)
529 		return -ENOMEM;
530 
531 	bus_req->address = address;
532 	bus_req->buffer = buffer;
533 	bus_req->length = length;
534 	bus_req->request = request;
535 	bus_req->packet = packet;
536 
537 	spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
538 	list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq);
539 	spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
540 	queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
541 
542 	return 0;
543 }
544 
545 static void ath6kl_sdio_irq_enable(struct ath6kl *ar)
546 {
547 	struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
548 	int ret;
549 
550 	sdio_claim_host(ar_sdio->func);
551 
552 	/* Register the isr */
553 	ret =  sdio_claim_irq(ar_sdio->func, ath6kl_sdio_irq_handler);
554 	if (ret)
555 		ath6kl_err("Failed to claim sdio irq: %d\n", ret);
556 
557 	sdio_release_host(ar_sdio->func);
558 }
559 
560 static void ath6kl_sdio_irq_disable(struct ath6kl *ar)
561 {
562 	struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
563 	int ret;
564 
565 	sdio_claim_host(ar_sdio->func);
566 
567 	/* Mask our function IRQ */
568 	while (atomic_read(&ar_sdio->irq_handling)) {
569 		sdio_release_host(ar_sdio->func);
570 		schedule_timeout(HZ / 10);
571 		sdio_claim_host(ar_sdio->func);
572 	}
573 
574 	ret = sdio_release_irq(ar_sdio->func);
575 	if (ret)
576 		ath6kl_err("Failed to release sdio irq: %d\n", ret);
577 
578 	sdio_release_host(ar_sdio->func);
579 }
580 
581 static struct hif_scatter_req *ath6kl_sdio_scatter_req_get(struct ath6kl *ar)
582 {
583 	struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
584 	struct hif_scatter_req *node = NULL;
585 	unsigned long flag;
586 
587 	spin_lock_irqsave(&ar_sdio->scat_lock, flag);
588 
589 	if (!list_empty(&ar_sdio->scat_req)) {
590 		node = list_first_entry(&ar_sdio->scat_req,
591 					struct hif_scatter_req, list);
592 		list_del(&node->list);
593 	}
594 
595 	spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
596 
597 	return node;
598 }
599 
600 static void ath6kl_sdio_scatter_req_add(struct ath6kl *ar,
601 					struct hif_scatter_req *s_req)
602 {
603 	struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
604 	unsigned long flag;
605 
606 	spin_lock_irqsave(&ar_sdio->scat_lock, flag);
607 
608 	list_add_tail(&s_req->list, &ar_sdio->scat_req);
609 
610 	spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
611 
612 }
613 
614 /* scatter gather read write request */
615 static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar,
616 					struct hif_scatter_req *scat_req)
617 {
618 	struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
619 	u32 request = scat_req->req;
620 	int status = 0;
621 	unsigned long flags;
622 
623 	if (!scat_req->len)
624 		return -EINVAL;
625 
626 	ath6kl_dbg(ATH6KL_DBG_SCATTER,
627 		"hif-scatter: total len: %d scatter entries: %d\n",
628 		scat_req->len, scat_req->scat_entries);
629 
630 	if (request & HIF_SYNCHRONOUS) {
631 		sdio_claim_host(ar_sdio->func);
632 		status = ath6kl_sdio_scat_rw(ar_sdio, scat_req->busrequest);
633 		sdio_release_host(ar_sdio->func);
634 	} else {
635 		spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
636 		list_add_tail(&scat_req->busrequest->list, &ar_sdio->wr_asyncq);
637 		spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
638 		queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
639 	}
640 
641 	return status;
642 }
643 
644 /* clean up scatter support */
645 static void ath6kl_sdio_cleanup_scatter(struct ath6kl *ar)
646 {
647 	struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
648 	struct hif_scatter_req *s_req, *tmp_req;
649 	unsigned long flag;
650 
651 	/* empty the free list */
652 	spin_lock_irqsave(&ar_sdio->scat_lock, flag);
653 	list_for_each_entry_safe(s_req, tmp_req, &ar_sdio->scat_req, list) {
654 		list_del(&s_req->list);
655 		spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
656 
657 		if (s_req->busrequest)
658 			ath6kl_sdio_free_bus_req(ar_sdio, s_req->busrequest);
659 		kfree(s_req->virt_dma_buf);
660 		kfree(s_req->sgentries);
661 		kfree(s_req);
662 
663 		spin_lock_irqsave(&ar_sdio->scat_lock, flag);
664 	}
665 	spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
666 }
667 
668 /* setup of HIF scatter resources */
669 static int ath6kl_sdio_enable_scatter(struct ath6kl *ar)
670 {
671 	struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
672 	struct htc_target *target = ar->htc_target;
673 	int ret;
674 	bool virt_scat = false;
675 
676 	/* check if host supports scatter and it meets our requirements */
677 	if (ar_sdio->func->card->host->max_segs < MAX_SCATTER_ENTRIES_PER_REQ) {
678 		ath6kl_err("host only supports scatter of :%d entries, need: %d\n",
679 			   ar_sdio->func->card->host->max_segs,
680 			   MAX_SCATTER_ENTRIES_PER_REQ);
681 		virt_scat = true;
682 	}
683 
684 	if (!virt_scat) {
685 		ret = ath6kl_sdio_alloc_prep_scat_req(ar_sdio,
686 				MAX_SCATTER_ENTRIES_PER_REQ,
687 				MAX_SCATTER_REQUESTS, virt_scat);
688 
689 		if (!ret) {
690 			ath6kl_dbg(ATH6KL_DBG_SCATTER,
691 				   "hif-scatter enabled: max scatter req : %d entries: %d\n",
692 				   MAX_SCATTER_REQUESTS,
693 				   MAX_SCATTER_ENTRIES_PER_REQ);
694 
695 			target->max_scat_entries = MAX_SCATTER_ENTRIES_PER_REQ;
696 			target->max_xfer_szper_scatreq =
697 						MAX_SCATTER_REQ_TRANSFER_SIZE;
698 		} else {
699 			ath6kl_sdio_cleanup_scatter(ar);
700 			ath6kl_warn("hif scatter resource setup failed, trying virtual scatter method\n");
701 		}
702 	}
703 
704 	if (virt_scat || ret) {
705 		ret = ath6kl_sdio_alloc_prep_scat_req(ar_sdio,
706 				ATH6KL_SCATTER_ENTRIES_PER_REQ,
707 				ATH6KL_SCATTER_REQS, virt_scat);
708 
709 		if (ret) {
710 			ath6kl_err("failed to alloc virtual scatter resources !\n");
711 			ath6kl_sdio_cleanup_scatter(ar);
712 			return ret;
713 		}
714 
715 		ath6kl_dbg(ATH6KL_DBG_SCATTER,
716 			   "Vitual scatter enabled, max_scat_req:%d, entries:%d\n",
717 			   ATH6KL_SCATTER_REQS, ATH6KL_SCATTER_ENTRIES_PER_REQ);
718 
719 		target->max_scat_entries = ATH6KL_SCATTER_ENTRIES_PER_REQ;
720 		target->max_xfer_szper_scatreq =
721 					ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER;
722 	}
723 
724 	return 0;
725 }
726 
727 static int ath6kl_sdio_suspend(struct ath6kl *ar)
728 {
729 	struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
730 	struct sdio_func *func = ar_sdio->func;
731 	mmc_pm_flag_t flags;
732 	int ret;
733 
734 	flags = sdio_get_host_pm_caps(func);
735 
736 	if (!(flags & MMC_PM_KEEP_POWER))
737 		/* as host doesn't support keep power we need to bail out */
738 		ath6kl_dbg(ATH6KL_DBG_SDIO,
739 			   "func %d doesn't support MMC_PM_KEEP_POWER\n",
740 			   func->num);
741 		return -EINVAL;
742 
743 	ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
744 	if (ret) {
745 		printk(KERN_ERR "ath6kl: set sdio pm flags failed: %d\n",
746 		       ret);
747 		return ret;
748 	}
749 
750 	ath6kl_deep_sleep_enable(ar);
751 
752 	return 0;
753 }
754 
755 static const struct ath6kl_hif_ops ath6kl_sdio_ops = {
756 	.read_write_sync = ath6kl_sdio_read_write_sync,
757 	.write_async = ath6kl_sdio_write_async,
758 	.irq_enable = ath6kl_sdio_irq_enable,
759 	.irq_disable = ath6kl_sdio_irq_disable,
760 	.scatter_req_get = ath6kl_sdio_scatter_req_get,
761 	.scatter_req_add = ath6kl_sdio_scatter_req_add,
762 	.enable_scatter = ath6kl_sdio_enable_scatter,
763 	.scat_req_rw = ath6kl_sdio_async_rw_scatter,
764 	.cleanup_scatter = ath6kl_sdio_cleanup_scatter,
765 	.suspend = ath6kl_sdio_suspend,
766 };
767 
768 static int ath6kl_sdio_probe(struct sdio_func *func,
769 			     const struct sdio_device_id *id)
770 {
771 	int ret;
772 	struct ath6kl_sdio *ar_sdio;
773 	struct ath6kl *ar;
774 	int count;
775 
776 	ath6kl_dbg(ATH6KL_DBG_SDIO,
777 		   "new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n",
778 		   func->num, func->vendor, func->device,
779 		   func->max_blksize, func->cur_blksize);
780 
781 	ar_sdio = kzalloc(sizeof(struct ath6kl_sdio), GFP_KERNEL);
782 	if (!ar_sdio)
783 		return -ENOMEM;
784 
785 	ar_sdio->dma_buffer = kzalloc(HIF_DMA_BUFFER_SIZE, GFP_KERNEL);
786 	if (!ar_sdio->dma_buffer) {
787 		ret = -ENOMEM;
788 		goto err_hif;
789 	}
790 
791 	ar_sdio->func = func;
792 	sdio_set_drvdata(func, ar_sdio);
793 
794 	ar_sdio->id = id;
795 	ar_sdio->is_disabled = true;
796 
797 	spin_lock_init(&ar_sdio->lock);
798 	spin_lock_init(&ar_sdio->scat_lock);
799 	spin_lock_init(&ar_sdio->wr_async_lock);
800 
801 	INIT_LIST_HEAD(&ar_sdio->scat_req);
802 	INIT_LIST_HEAD(&ar_sdio->bus_req_freeq);
803 	INIT_LIST_HEAD(&ar_sdio->wr_asyncq);
804 
805 	INIT_WORK(&ar_sdio->wr_async_work, ath6kl_sdio_write_async_work);
806 
807 	for (count = 0; count < BUS_REQUEST_MAX_NUM; count++)
808 		ath6kl_sdio_free_bus_req(ar_sdio, &ar_sdio->bus_req[count]);
809 
810 	ar = ath6kl_core_alloc(&ar_sdio->func->dev);
811 	if (!ar) {
812 		ath6kl_err("Failed to alloc ath6kl core\n");
813 		ret = -ENOMEM;
814 		goto err_dma;
815 	}
816 
817 	ar_sdio->ar = ar;
818 	ar->hif_priv = ar_sdio;
819 	ar->hif_ops = &ath6kl_sdio_ops;
820 
821 	ath6kl_sdio_set_mbox_info(ar);
822 
823 	sdio_claim_host(func);
824 
825 	if ((ar_sdio->id->device & MANUFACTURER_ID_ATH6KL_BASE_MASK) >=
826 	    MANUFACTURER_ID_AR6003_BASE) {
827 		/* enable 4-bit ASYNC interrupt on AR6003 or later */
828 		ret = ath6kl_sdio_func0_cmd52_wr_byte(func->card,
829 						CCCR_SDIO_IRQ_MODE_REG,
830 						SDIO_IRQ_MODE_ASYNC_4BIT_IRQ);
831 		if (ret) {
832 			ath6kl_err("Failed to enable 4-bit async irq mode %d\n",
833 				   ret);
834 			sdio_release_host(func);
835 			goto err_cfg80211;
836 		}
837 
838 		ath6kl_dbg(ATH6KL_DBG_SDIO, "4-bit async irq mode enabled\n");
839 	}
840 
841 	/* give us some time to enable, in ms */
842 	func->enable_timeout = 100;
843 
844 	sdio_release_host(func);
845 
846 	ret = ath6kl_sdio_power_on(ar_sdio);
847 	if (ret)
848 		goto err_cfg80211;
849 
850 	sdio_claim_host(func);
851 
852 	ret = sdio_set_block_size(func, HIF_MBOX_BLOCK_SIZE);
853 	if (ret) {
854 		ath6kl_err("Set sdio block size %d failed: %d)\n",
855 			   HIF_MBOX_BLOCK_SIZE, ret);
856 		sdio_release_host(func);
857 		goto err_off;
858 	}
859 
860 	sdio_release_host(func);
861 
862 	ret = ath6kl_core_init(ar);
863 	if (ret) {
864 		ath6kl_err("Failed to init ath6kl core\n");
865 		goto err_off;
866 	}
867 
868 	return ret;
869 
870 err_off:
871 	ath6kl_sdio_power_off(ar_sdio);
872 err_cfg80211:
873 	ath6kl_cfg80211_deinit(ar_sdio->ar);
874 err_dma:
875 	kfree(ar_sdio->dma_buffer);
876 err_hif:
877 	kfree(ar_sdio);
878 
879 	return ret;
880 }
881 
882 static void ath6kl_sdio_remove(struct sdio_func *func)
883 {
884 	struct ath6kl_sdio *ar_sdio;
885 
886 	ath6kl_dbg(ATH6KL_DBG_SDIO,
887 		   "removed func %d vendor 0x%x device 0x%x\n",
888 		   func->num, func->vendor, func->device);
889 
890 	ar_sdio = sdio_get_drvdata(func);
891 
892 	ath6kl_stop_txrx(ar_sdio->ar);
893 	cancel_work_sync(&ar_sdio->wr_async_work);
894 
895 	ath6kl_unavail_ev(ar_sdio->ar);
896 
897 	ath6kl_sdio_power_off(ar_sdio);
898 
899 	kfree(ar_sdio->dma_buffer);
900 	kfree(ar_sdio);
901 }
902 
903 static const struct sdio_device_id ath6kl_sdio_devices[] = {
904 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x0))},
905 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x1))},
906 	{},
907 };
908 
909 MODULE_DEVICE_TABLE(sdio, ath6kl_sdio_devices);
910 
911 static struct sdio_driver ath6kl_sdio_driver = {
912 	.name = "ath6kl_sdio",
913 	.id_table = ath6kl_sdio_devices,
914 	.probe = ath6kl_sdio_probe,
915 	.remove = ath6kl_sdio_remove,
916 };
917 
918 static int __init ath6kl_sdio_init(void)
919 {
920 	int ret;
921 
922 	ret = sdio_register_driver(&ath6kl_sdio_driver);
923 	if (ret)
924 		ath6kl_err("sdio driver registration failed: %d\n", ret);
925 
926 	return ret;
927 }
928 
929 static void __exit ath6kl_sdio_exit(void)
930 {
931 	sdio_unregister_driver(&ath6kl_sdio_driver);
932 }
933 
934 module_init(ath6kl_sdio_init);
935 module_exit(ath6kl_sdio_exit);
936 
937 MODULE_AUTHOR("Atheros Communications, Inc.");
938 MODULE_DESCRIPTION("Driver support for Atheros AR600x SDIO devices");
939 MODULE_LICENSE("Dual BSD/GPL");
940 
941 MODULE_FIRMWARE(AR6003_REV2_OTP_FILE);
942 MODULE_FIRMWARE(AR6003_REV2_FIRMWARE_FILE);
943 MODULE_FIRMWARE(AR6003_REV2_PATCH_FILE);
944 MODULE_FIRMWARE(AR6003_REV2_BOARD_DATA_FILE);
945 MODULE_FIRMWARE(AR6003_REV2_DEFAULT_BOARD_DATA_FILE);
946 MODULE_FIRMWARE(AR6003_REV3_OTP_FILE);
947 MODULE_FIRMWARE(AR6003_REV3_FIRMWARE_FILE);
948 MODULE_FIRMWARE(AR6003_REV3_PATCH_FILE);
949 MODULE_FIRMWARE(AR6003_REV3_BOARD_DATA_FILE);
950 MODULE_FIRMWARE(AR6003_REV3_DEFAULT_BOARD_DATA_FILE);
951