xref: /linux/drivers/net/wireless/intersil/p54/p54pci.c (revision 0759356bf5fadbd16794bec982a9af48351a02ff)
1 // SPDX-License-Identifier: GPL-2.0-only
2 
3 /*
4  * Linux device driver for PCI based Prism54
5  *
6  * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
7  * Copyright (c) 2008, Christian Lamparter <chunkeey@web.de>
8  *
9  * Based on the islsm (softmac prism54) driver, which is:
10  * Copyright 2004-2006 Jean-Baptiste Note <jean-baptiste.note@m4x.org>, et al.
11  */
12 
13 #include <linux/pci.h>
14 #include <linux/slab.h>
15 #include <linux/firmware.h>
16 #include <linux/etherdevice.h>
17 #include <linux/delay.h>
18 #include <linux/completion.h>
19 #include <linux/module.h>
20 #include <net/mac80211.h>
21 
22 #include "p54.h"
23 #include "lmac.h"
24 #include "p54pci.h"
25 
26 MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>");
27 MODULE_DESCRIPTION("Prism54 PCI wireless driver");
28 MODULE_LICENSE("GPL");
29 MODULE_ALIAS("prism54pci");
30 MODULE_FIRMWARE("isl3886pci");
31 
32 static const struct pci_device_id p54p_table[] = {
33 	/* Intersil PRISM Duette/Prism GT Wireless LAN adapter */
34 	{ PCI_DEVICE(0x1260, 0x3890) },
35 	/* 3COM 3CRWE154G72 Wireless LAN adapter */
36 	{ PCI_DEVICE(0x10b7, 0x6001) },
37 	/* Intersil PRISM Indigo Wireless LAN adapter */
38 	{ PCI_DEVICE(0x1260, 0x3877) },
39 	/* Intersil PRISM Javelin/Xbow Wireless LAN adapter */
40 	{ PCI_DEVICE(0x1260, 0x3886) },
41 	/* Intersil PRISM Xbow Wireless LAN adapter (Symbol AP-300) */
42 	{ PCI_DEVICE(0x1260, 0xffff) },
43 	{ },
44 };
45 
46 MODULE_DEVICE_TABLE(pci, p54p_table);
47 
48 static int p54p_upload_firmware(struct ieee80211_hw *dev)
49 {
50 	struct p54p_priv *priv = dev->priv;
51 	__le32 reg;
52 	int err;
53 	__le32 *data;
54 	u32 remains, left, device_addr;
55 
56 	P54P_WRITE(int_enable, cpu_to_le32(0));
57 	P54P_READ(int_enable);
58 	udelay(10);
59 
60 	reg = P54P_READ(ctrl_stat);
61 	reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET);
62 	reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RAMBOOT);
63 	P54P_WRITE(ctrl_stat, reg);
64 	P54P_READ(ctrl_stat);
65 	udelay(10);
66 
67 	reg |= cpu_to_le32(ISL38XX_CTRL_STAT_RESET);
68 	P54P_WRITE(ctrl_stat, reg);
69 	wmb();
70 	udelay(10);
71 
72 	reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET);
73 	P54P_WRITE(ctrl_stat, reg);
74 	wmb();
75 
76 	/* wait for the firmware to reset properly */
77 	mdelay(10);
78 
79 	err = p54_parse_firmware(dev, priv->firmware);
80 	if (err)
81 		return err;
82 
83 	if (priv->common.fw_interface != FW_LM86) {
84 		dev_err(&priv->pdev->dev, "wrong firmware, "
85 			"please get a LM86(PCI) firmware a try again.\n");
86 		return -EINVAL;
87 	}
88 
89 	data = (__le32 *) priv->firmware->data;
90 	remains = priv->firmware->size;
91 	device_addr = ISL38XX_DEV_FIRMWARE_ADDR;
92 	while (remains) {
93 		u32 i = 0;
94 		left = min((u32)0x1000, remains);
95 		P54P_WRITE(direct_mem_base, cpu_to_le32(device_addr));
96 		P54P_READ(int_enable);
97 
98 		device_addr += 0x1000;
99 		while (i < left) {
100 			P54P_WRITE(direct_mem_win[i], *data++);
101 			i += sizeof(u32);
102 		}
103 
104 		remains -= left;
105 		P54P_READ(int_enable);
106 	}
107 
108 	reg = P54P_READ(ctrl_stat);
109 	reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_CLKRUN);
110 	reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET);
111 	reg |= cpu_to_le32(ISL38XX_CTRL_STAT_RAMBOOT);
112 	P54P_WRITE(ctrl_stat, reg);
113 	P54P_READ(ctrl_stat);
114 	udelay(10);
115 
116 	reg |= cpu_to_le32(ISL38XX_CTRL_STAT_RESET);
117 	P54P_WRITE(ctrl_stat, reg);
118 	wmb();
119 	udelay(10);
120 
121 	reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET);
122 	P54P_WRITE(ctrl_stat, reg);
123 	wmb();
124 	udelay(10);
125 
126 	/* wait for the firmware to boot properly */
127 	mdelay(100);
128 
129 	return 0;
130 }
131 
132 static void p54p_refill_rx_ring(struct ieee80211_hw *dev,
133 	int ring_index, struct p54p_desc *ring, u32 ring_limit,
134 	struct sk_buff **rx_buf, u32 index)
135 {
136 	struct p54p_priv *priv = dev->priv;
137 	struct p54p_ring_control *ring_control = priv->ring_control;
138 	u32 limit, idx, i;
139 
140 	idx = le32_to_cpu(ring_control->host_idx[ring_index]);
141 	limit = idx;
142 	limit -= index;
143 	limit = ring_limit - limit;
144 
145 	i = idx % ring_limit;
146 	while (limit-- > 1) {
147 		struct p54p_desc *desc = &ring[i];
148 
149 		if (!desc->host_addr) {
150 			struct sk_buff *skb;
151 			dma_addr_t mapping;
152 			skb = dev_alloc_skb(priv->common.rx_mtu + 32);
153 			if (!skb)
154 				break;
155 
156 			mapping = dma_map_single(&priv->pdev->dev,
157 						 skb_tail_pointer(skb),
158 						 priv->common.rx_mtu + 32,
159 						 DMA_FROM_DEVICE);
160 
161 			if (dma_mapping_error(&priv->pdev->dev, mapping)) {
162 				dev_kfree_skb_any(skb);
163 				dev_err(&priv->pdev->dev,
164 					"RX DMA Mapping error\n");
165 				break;
166 			}
167 
168 			desc->host_addr = cpu_to_le32(mapping);
169 			desc->device_addr = 0;	// FIXME: necessary?
170 			desc->len = cpu_to_le16(priv->common.rx_mtu + 32);
171 			desc->flags = 0;
172 			rx_buf[i] = skb;
173 		}
174 
175 		i++;
176 		idx++;
177 		i %= ring_limit;
178 	}
179 
180 	wmb();
181 	ring_control->host_idx[ring_index] = cpu_to_le32(idx);
182 }
183 
184 static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index,
185 	int ring_index, struct p54p_desc *ring, u32 ring_limit,
186 	struct sk_buff **rx_buf)
187 {
188 	struct p54p_priv *priv = dev->priv;
189 	struct p54p_ring_control *ring_control = priv->ring_control;
190 	struct p54p_desc *desc;
191 	u32 idx, i;
192 
193 	i = (*index) % ring_limit;
194 	(*index) = idx = le32_to_cpu(ring_control->device_idx[ring_index]);
195 	idx %= ring_limit;
196 	while (i != idx) {
197 		u16 len;
198 		struct sk_buff *skb;
199 		dma_addr_t dma_addr;
200 		desc = &ring[i];
201 		len = le16_to_cpu(desc->len);
202 		skb = rx_buf[i];
203 
204 		if (!skb) {
205 			i++;
206 			i %= ring_limit;
207 			continue;
208 		}
209 
210 		if (unlikely(len > priv->common.rx_mtu)) {
211 			if (net_ratelimit())
212 				dev_err(&priv->pdev->dev, "rx'd frame size "
213 					"exceeds length threshold.\n");
214 
215 			len = priv->common.rx_mtu;
216 		}
217 		dma_addr = le32_to_cpu(desc->host_addr);
218 		dma_sync_single_for_cpu(&priv->pdev->dev, dma_addr,
219 					priv->common.rx_mtu + 32,
220 					DMA_FROM_DEVICE);
221 		skb_put(skb, len);
222 
223 		if (p54_rx(dev, skb)) {
224 			dma_unmap_single(&priv->pdev->dev, dma_addr,
225 					 priv->common.rx_mtu + 32,
226 					 DMA_FROM_DEVICE);
227 			rx_buf[i] = NULL;
228 			desc->host_addr = cpu_to_le32(0);
229 		} else {
230 			skb_trim(skb, 0);
231 			dma_sync_single_for_device(&priv->pdev->dev, dma_addr,
232 						   priv->common.rx_mtu + 32,
233 						   DMA_FROM_DEVICE);
234 			desc->len = cpu_to_le16(priv->common.rx_mtu + 32);
235 		}
236 
237 		i++;
238 		i %= ring_limit;
239 	}
240 
241 	p54p_refill_rx_ring(dev, ring_index, ring, ring_limit, rx_buf, *index);
242 }
243 
244 static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index,
245 	int ring_index, struct p54p_desc *ring, u32 ring_limit,
246 	struct sk_buff **tx_buf)
247 {
248 	struct p54p_priv *priv = dev->priv;
249 	struct p54p_ring_control *ring_control = priv->ring_control;
250 	struct p54p_desc *desc;
251 	struct sk_buff *skb;
252 	u32 idx, i;
253 
254 	i = (*index) % ring_limit;
255 	(*index) = idx = le32_to_cpu(ring_control->device_idx[ring_index]);
256 	idx %= ring_limit;
257 
258 	while (i != idx) {
259 		desc = &ring[i];
260 
261 		skb = tx_buf[i];
262 		tx_buf[i] = NULL;
263 
264 		dma_unmap_single(&priv->pdev->dev,
265 				 le32_to_cpu(desc->host_addr),
266 				 le16_to_cpu(desc->len), DMA_TO_DEVICE);
267 
268 		desc->host_addr = 0;
269 		desc->device_addr = 0;
270 		desc->len = 0;
271 		desc->flags = 0;
272 
273 		if (skb && FREE_AFTER_TX(skb))
274 			p54_free_skb(dev, skb);
275 
276 		i++;
277 		i %= ring_limit;
278 	}
279 }
280 
281 static void p54p_tasklet(struct tasklet_struct *t)
282 {
283 	struct p54p_priv *priv = from_tasklet(priv, t, tasklet);
284 	struct ieee80211_hw *dev = pci_get_drvdata(priv->pdev);
285 	struct p54p_ring_control *ring_control = priv->ring_control;
286 
287 	p54p_check_tx_ring(dev, &priv->tx_idx_mgmt, 3, ring_control->tx_mgmt,
288 			   ARRAY_SIZE(ring_control->tx_mgmt),
289 			   priv->tx_buf_mgmt);
290 
291 	p54p_check_tx_ring(dev, &priv->tx_idx_data, 1, ring_control->tx_data,
292 			   ARRAY_SIZE(ring_control->tx_data),
293 			   priv->tx_buf_data);
294 
295 	p54p_check_rx_ring(dev, &priv->rx_idx_mgmt, 2, ring_control->rx_mgmt,
296 		ARRAY_SIZE(ring_control->rx_mgmt), priv->rx_buf_mgmt);
297 
298 	p54p_check_rx_ring(dev, &priv->rx_idx_data, 0, ring_control->rx_data,
299 		ARRAY_SIZE(ring_control->rx_data), priv->rx_buf_data);
300 
301 	wmb();
302 	P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE));
303 }
304 
305 static irqreturn_t p54p_interrupt(int irq, void *dev_id)
306 {
307 	struct ieee80211_hw *dev = dev_id;
308 	struct p54p_priv *priv = dev->priv;
309 	__le32 reg;
310 
311 	reg = P54P_READ(int_ident);
312 	if (unlikely(reg == cpu_to_le32(0xFFFFFFFF))) {
313 		goto out;
314 	}
315 	P54P_WRITE(int_ack, reg);
316 
317 	reg &= P54P_READ(int_enable);
318 
319 	if (reg & cpu_to_le32(ISL38XX_INT_IDENT_UPDATE))
320 		tasklet_schedule(&priv->tasklet);
321 	else if (reg & cpu_to_le32(ISL38XX_INT_IDENT_INIT))
322 		complete(&priv->boot_comp);
323 
324 out:
325 	return reg ? IRQ_HANDLED : IRQ_NONE;
326 }
327 
328 static void p54p_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
329 {
330 	unsigned long flags;
331 	struct p54p_priv *priv = dev->priv;
332 	struct p54p_ring_control *ring_control = priv->ring_control;
333 	struct p54p_desc *desc;
334 	dma_addr_t mapping;
335 	u32 idx, i;
336 	__le32 device_addr;
337 
338 	spin_lock_irqsave(&priv->lock, flags);
339 	idx = le32_to_cpu(ring_control->host_idx[1]);
340 	i = idx % ARRAY_SIZE(ring_control->tx_data);
341 	device_addr = ((struct p54_hdr *)skb->data)->req_id;
342 
343 	mapping = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
344 				 DMA_TO_DEVICE);
345 	if (dma_mapping_error(&priv->pdev->dev, mapping)) {
346 		spin_unlock_irqrestore(&priv->lock, flags);
347 		p54_free_skb(dev, skb);
348 		dev_err(&priv->pdev->dev, "TX DMA mapping error\n");
349 		return ;
350 	}
351 	priv->tx_buf_data[i] = skb;
352 
353 	desc = &ring_control->tx_data[i];
354 	desc->host_addr = cpu_to_le32(mapping);
355 	desc->device_addr = device_addr;
356 	desc->len = cpu_to_le16(skb->len);
357 	desc->flags = 0;
358 
359 	wmb();
360 	ring_control->host_idx[1] = cpu_to_le32(idx + 1);
361 	spin_unlock_irqrestore(&priv->lock, flags);
362 
363 	P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE));
364 	P54P_READ(dev_int);
365 }
366 
367 static void p54p_stop(struct ieee80211_hw *dev)
368 {
369 	struct p54p_priv *priv = dev->priv;
370 	struct p54p_ring_control *ring_control = priv->ring_control;
371 	unsigned int i;
372 	struct p54p_desc *desc;
373 
374 	P54P_WRITE(int_enable, cpu_to_le32(0));
375 	P54P_READ(int_enable);
376 	udelay(10);
377 
378 	free_irq(priv->pdev->irq, dev);
379 
380 	tasklet_kill(&priv->tasklet);
381 
382 	P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET));
383 
384 	for (i = 0; i < ARRAY_SIZE(priv->rx_buf_data); i++) {
385 		desc = &ring_control->rx_data[i];
386 		if (desc->host_addr)
387 			dma_unmap_single(&priv->pdev->dev,
388 					 le32_to_cpu(desc->host_addr),
389 					 priv->common.rx_mtu + 32,
390 					 DMA_FROM_DEVICE);
391 		kfree_skb(priv->rx_buf_data[i]);
392 		priv->rx_buf_data[i] = NULL;
393 	}
394 
395 	for (i = 0; i < ARRAY_SIZE(priv->rx_buf_mgmt); i++) {
396 		desc = &ring_control->rx_mgmt[i];
397 		if (desc->host_addr)
398 			dma_unmap_single(&priv->pdev->dev,
399 					 le32_to_cpu(desc->host_addr),
400 					 priv->common.rx_mtu + 32,
401 					 DMA_FROM_DEVICE);
402 		kfree_skb(priv->rx_buf_mgmt[i]);
403 		priv->rx_buf_mgmt[i] = NULL;
404 	}
405 
406 	for (i = 0; i < ARRAY_SIZE(priv->tx_buf_data); i++) {
407 		desc = &ring_control->tx_data[i];
408 		if (desc->host_addr)
409 			dma_unmap_single(&priv->pdev->dev,
410 					 le32_to_cpu(desc->host_addr),
411 					 le16_to_cpu(desc->len),
412 					 DMA_TO_DEVICE);
413 
414 		p54_free_skb(dev, priv->tx_buf_data[i]);
415 		priv->tx_buf_data[i] = NULL;
416 	}
417 
418 	for (i = 0; i < ARRAY_SIZE(priv->tx_buf_mgmt); i++) {
419 		desc = &ring_control->tx_mgmt[i];
420 		if (desc->host_addr)
421 			dma_unmap_single(&priv->pdev->dev,
422 					 le32_to_cpu(desc->host_addr),
423 					 le16_to_cpu(desc->len),
424 					 DMA_TO_DEVICE);
425 
426 		p54_free_skb(dev, priv->tx_buf_mgmt[i]);
427 		priv->tx_buf_mgmt[i] = NULL;
428 	}
429 
430 	memset(ring_control, 0, sizeof(*ring_control));
431 }
432 
433 static int p54p_open(struct ieee80211_hw *dev)
434 {
435 	struct p54p_priv *priv = dev->priv;
436 	int err;
437 	long time_left;
438 
439 	init_completion(&priv->boot_comp);
440 	err = request_irq(priv->pdev->irq, p54p_interrupt,
441 			  IRQF_SHARED, "p54pci", dev);
442 	if (err) {
443 		dev_err(&priv->pdev->dev, "failed to register IRQ handler\n");
444 		return err;
445 	}
446 
447 	memset(priv->ring_control, 0, sizeof(*priv->ring_control));
448 	err = p54p_upload_firmware(dev);
449 	if (err) {
450 		free_irq(priv->pdev->irq, dev);
451 		return err;
452 	}
453 	priv->rx_idx_data = priv->tx_idx_data = 0;
454 	priv->rx_idx_mgmt = priv->tx_idx_mgmt = 0;
455 
456 	p54p_refill_rx_ring(dev, 0, priv->ring_control->rx_data,
457 		ARRAY_SIZE(priv->ring_control->rx_data), priv->rx_buf_data, 0);
458 
459 	p54p_refill_rx_ring(dev, 2, priv->ring_control->rx_mgmt,
460 		ARRAY_SIZE(priv->ring_control->rx_mgmt), priv->rx_buf_mgmt, 0);
461 
462 	P54P_WRITE(ring_control_base, cpu_to_le32(priv->ring_control_dma));
463 	P54P_READ(ring_control_base);
464 	wmb();
465 	udelay(10);
466 
467 	P54P_WRITE(int_enable, cpu_to_le32(ISL38XX_INT_IDENT_INIT));
468 	P54P_READ(int_enable);
469 	wmb();
470 	udelay(10);
471 
472 	P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET));
473 	P54P_READ(dev_int);
474 
475 	time_left = wait_for_completion_interruptible_timeout(
476 			&priv->boot_comp, HZ);
477 	if (time_left <= 0) {
478 		wiphy_err(dev->wiphy, "Cannot boot firmware!\n");
479 		p54p_stop(dev);
480 		return time_left ? -ERESTARTSYS : -ETIMEDOUT;
481 	}
482 
483 	P54P_WRITE(int_enable, cpu_to_le32(ISL38XX_INT_IDENT_UPDATE));
484 	P54P_READ(int_enable);
485 	wmb();
486 	udelay(10);
487 
488 	P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE));
489 	P54P_READ(dev_int);
490 	wmb();
491 	udelay(10);
492 
493 	return 0;
494 }
495 
496 static void p54p_firmware_step2(const struct firmware *fw,
497 				void *context)
498 {
499 	struct p54p_priv *priv = context;
500 	struct ieee80211_hw *dev = priv->common.hw;
501 	struct pci_dev *pdev = priv->pdev;
502 	int err;
503 
504 	if (!fw) {
505 		dev_err(&pdev->dev, "Cannot find firmware (isl3886pci)\n");
506 		err = -ENOENT;
507 		goto out;
508 	}
509 
510 	priv->firmware = fw;
511 
512 	err = p54p_open(dev);
513 	if (err)
514 		goto out;
515 	err = p54_read_eeprom(dev);
516 	p54p_stop(dev);
517 	if (err)
518 		goto out;
519 
520 	err = p54_register_common(dev, &pdev->dev);
521 	if (err)
522 		goto out;
523 
524 out:
525 
526 	complete(&priv->fw_loaded);
527 
528 	if (err) {
529 		struct device *parent = pdev->dev.parent;
530 
531 		if (parent)
532 			device_lock(parent);
533 
534 		/*
535 		 * This will indirectly result in a call to p54p_remove.
536 		 * Hence, we don't need to bother with freeing any
537 		 * allocated ressources at all.
538 		 */
539 		device_release_driver(&pdev->dev);
540 
541 		if (parent)
542 			device_unlock(parent);
543 	}
544 
545 	pci_dev_put(pdev);
546 }
547 
548 static int p54p_probe(struct pci_dev *pdev,
549 				const struct pci_device_id *id)
550 {
551 	struct p54p_priv *priv;
552 	struct ieee80211_hw *dev;
553 	unsigned long mem_addr, mem_len;
554 	int err;
555 
556 	pci_dev_get(pdev);
557 	err = pci_enable_device(pdev);
558 	if (err) {
559 		dev_err(&pdev->dev, "Cannot enable new PCI device\n");
560 		goto err_put;
561 	}
562 
563 	mem_addr = pci_resource_start(pdev, 0);
564 	mem_len = pci_resource_len(pdev, 0);
565 	if (mem_len < sizeof(struct p54p_csr)) {
566 		dev_err(&pdev->dev, "Too short PCI resources\n");
567 		err = -ENODEV;
568 		goto err_disable_dev;
569 	}
570 
571 	err = pci_request_regions(pdev, "p54pci");
572 	if (err) {
573 		dev_err(&pdev->dev, "Cannot obtain PCI resources\n");
574 		goto err_disable_dev;
575 	}
576 
577 	err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
578 	if (!err)
579 		err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
580 	if (err) {
581 		dev_err(&pdev->dev, "No suitable DMA available\n");
582 		goto err_free_reg;
583 	}
584 
585 	pci_set_master(pdev);
586 	pci_try_set_mwi(pdev);
587 
588 	pci_write_config_byte(pdev, 0x40, 0);
589 	pci_write_config_byte(pdev, 0x41, 0);
590 
591 	dev = p54_init_common(sizeof(*priv));
592 	if (!dev) {
593 		dev_err(&pdev->dev, "ieee80211 alloc failed\n");
594 		err = -ENOMEM;
595 		goto err_free_reg;
596 	}
597 
598 	priv = dev->priv;
599 	priv->pdev = pdev;
600 
601 	init_completion(&priv->fw_loaded);
602 	SET_IEEE80211_DEV(dev, &pdev->dev);
603 	pci_set_drvdata(pdev, dev);
604 
605 	priv->map = ioremap(mem_addr, mem_len);
606 	if (!priv->map) {
607 		dev_err(&pdev->dev, "Cannot map device memory\n");
608 		err = -ENOMEM;
609 		goto err_free_dev;
610 	}
611 
612 	priv->ring_control = dma_alloc_coherent(&pdev->dev,
613 						sizeof(*priv->ring_control),
614 						&priv->ring_control_dma, GFP_KERNEL);
615 	if (!priv->ring_control) {
616 		dev_err(&pdev->dev, "Cannot allocate rings\n");
617 		err = -ENOMEM;
618 		goto err_iounmap;
619 	}
620 	priv->common.open = p54p_open;
621 	priv->common.stop = p54p_stop;
622 	priv->common.tx = p54p_tx;
623 
624 	spin_lock_init(&priv->lock);
625 	tasklet_setup(&priv->tasklet, p54p_tasklet);
626 
627 	err = request_firmware_nowait(THIS_MODULE, 1, "isl3886pci",
628 				      &priv->pdev->dev, GFP_KERNEL,
629 				      priv, p54p_firmware_step2);
630 	if (!err)
631 		return 0;
632 
633 	dma_free_coherent(&pdev->dev, sizeof(*priv->ring_control),
634 			  priv->ring_control, priv->ring_control_dma);
635 
636  err_iounmap:
637 	iounmap(priv->map);
638 
639  err_free_dev:
640 	p54_free_common(dev);
641 
642  err_free_reg:
643 	pci_release_regions(pdev);
644  err_disable_dev:
645 	pci_disable_device(pdev);
646 err_put:
647 	pci_dev_put(pdev);
648 	return err;
649 }
650 
651 static void p54p_remove(struct pci_dev *pdev)
652 {
653 	struct ieee80211_hw *dev = pci_get_drvdata(pdev);
654 	struct p54p_priv *priv;
655 
656 	if (!dev)
657 		return;
658 
659 	priv = dev->priv;
660 	wait_for_completion(&priv->fw_loaded);
661 	p54_unregister_common(dev);
662 	release_firmware(priv->firmware);
663 	dma_free_coherent(&pdev->dev, sizeof(*priv->ring_control),
664 			  priv->ring_control, priv->ring_control_dma);
665 	iounmap(priv->map);
666 	pci_release_regions(pdev);
667 	pci_disable_device(pdev);
668 	p54_free_common(dev);
669 }
670 
671 #ifdef CONFIG_PM_SLEEP
672 static int p54p_suspend(struct device *device)
673 {
674 	struct pci_dev *pdev = to_pci_dev(device);
675 
676 	pci_save_state(pdev);
677 	pci_set_power_state(pdev, PCI_D3hot);
678 	pci_disable_device(pdev);
679 	return 0;
680 }
681 
682 static int p54p_resume(struct device *device)
683 {
684 	struct pci_dev *pdev = to_pci_dev(device);
685 	int err;
686 
687 	err = pci_reenable_device(pdev);
688 	if (err)
689 		return err;
690 	return pci_set_power_state(pdev, PCI_D0);
691 }
692 
693 static SIMPLE_DEV_PM_OPS(p54pci_pm_ops, p54p_suspend, p54p_resume);
694 
695 #define P54P_PM_OPS (&p54pci_pm_ops)
696 #else
697 #define P54P_PM_OPS (NULL)
698 #endif /* CONFIG_PM_SLEEP */
699 
700 static struct pci_driver p54p_driver = {
701 	.name		= "p54pci",
702 	.id_table	= p54p_table,
703 	.probe		= p54p_probe,
704 	.remove		= p54p_remove,
705 	.driver.pm	= P54P_PM_OPS,
706 };
707 
708 module_pci_driver(p54p_driver);
709