xref: /linux/drivers/s390/net/ism_drv.c (revision 07fdad3a93756b872da7b53647715c48d0f4a2d0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * ISM driver for s390.
4  *
5  * Copyright IBM Corp. 2018
6  */
7 #define KMSG_COMPONENT "ism"
8 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
9 
10 #include <linux/export.h>
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include <linux/interrupt.h>
14 #include <linux/device.h>
15 #include <linux/err.h>
16 #include <linux/ctype.h>
17 #include <linux/processor.h>
18 
19 #include "ism.h"
20 
21 MODULE_DESCRIPTION("ISM driver for s390");
22 MODULE_LICENSE("GPL");
23 
24 #define DRV_NAME "ism"
25 
26 static const struct pci_device_id ism_device_table[] = {
27 	{ PCI_VDEVICE(IBM, PCI_DEVICE_ID_IBM_ISM), 0 },
28 	{ 0, }
29 };
30 MODULE_DEVICE_TABLE(pci, ism_device_table);
31 
32 static debug_info_t *ism_debug_info;
33 
34 static int ism_cmd(struct ism_dev *ism, void *cmd)
35 {
36 	struct ism_req_hdr *req = cmd;
37 	struct ism_resp_hdr *resp = cmd;
38 
39 	spin_lock(&ism->cmd_lock);
40 	__ism_write_cmd(ism, req + 1, sizeof(*req), req->len - sizeof(*req));
41 	__ism_write_cmd(ism, req, 0, sizeof(*req));
42 
43 	WRITE_ONCE(resp->ret, ISM_ERROR);
44 
45 	__ism_read_cmd(ism, resp, 0, sizeof(*resp));
46 	if (resp->ret) {
47 		debug_text_event(ism_debug_info, 0, "cmd failure");
48 		debug_event(ism_debug_info, 0, resp, sizeof(*resp));
49 		goto out;
50 	}
51 	__ism_read_cmd(ism, resp + 1, sizeof(*resp), resp->len - sizeof(*resp));
52 out:
53 	spin_unlock(&ism->cmd_lock);
54 	return resp->ret;
55 }
56 
57 static int ism_cmd_simple(struct ism_dev *ism, u32 cmd_code)
58 {
59 	union ism_cmd_simple cmd;
60 
61 	memset(&cmd, 0, sizeof(cmd));
62 	cmd.request.hdr.cmd = cmd_code;
63 	cmd.request.hdr.len = sizeof(cmd.request);
64 
65 	return ism_cmd(ism, &cmd);
66 }
67 
68 static int query_info(struct ism_dev *ism)
69 {
70 	union ism_qi cmd;
71 
72 	memset(&cmd, 0, sizeof(cmd));
73 	cmd.request.hdr.cmd = ISM_QUERY_INFO;
74 	cmd.request.hdr.len = sizeof(cmd.request);
75 
76 	if (ism_cmd(ism, &cmd))
77 		goto out;
78 
79 	debug_text_event(ism_debug_info, 3, "query info");
80 	debug_event(ism_debug_info, 3, &cmd.response, sizeof(cmd.response));
81 out:
82 	return 0;
83 }
84 
85 static int register_sba(struct ism_dev *ism)
86 {
87 	union ism_reg_sba cmd;
88 	dma_addr_t dma_handle;
89 	struct ism_sba *sba;
90 
91 	sba = dma_alloc_coherent(&ism->pdev->dev, PAGE_SIZE, &dma_handle,
92 				 GFP_KERNEL);
93 	if (!sba)
94 		return -ENOMEM;
95 
96 	memset(&cmd, 0, sizeof(cmd));
97 	cmd.request.hdr.cmd = ISM_REG_SBA;
98 	cmd.request.hdr.len = sizeof(cmd.request);
99 	cmd.request.sba = dma_handle;
100 
101 	if (ism_cmd(ism, &cmd)) {
102 		dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, sba, dma_handle);
103 		return -EIO;
104 	}
105 
106 	ism->sba = sba;
107 	ism->sba_dma_addr = dma_handle;
108 
109 	return 0;
110 }
111 
112 static int register_ieq(struct ism_dev *ism)
113 {
114 	union ism_reg_ieq cmd;
115 	dma_addr_t dma_handle;
116 	struct ism_eq *ieq;
117 
118 	ieq = dma_alloc_coherent(&ism->pdev->dev, PAGE_SIZE, &dma_handle,
119 				 GFP_KERNEL);
120 	if (!ieq)
121 		return -ENOMEM;
122 
123 	memset(&cmd, 0, sizeof(cmd));
124 	cmd.request.hdr.cmd = ISM_REG_IEQ;
125 	cmd.request.hdr.len = sizeof(cmd.request);
126 	cmd.request.ieq = dma_handle;
127 	cmd.request.len = sizeof(*ieq);
128 
129 	if (ism_cmd(ism, &cmd)) {
130 		dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, ieq, dma_handle);
131 		return -EIO;
132 	}
133 
134 	ism->ieq = ieq;
135 	ism->ieq_idx = -1;
136 	ism->ieq_dma_addr = dma_handle;
137 
138 	return 0;
139 }
140 
141 static int unregister_sba(struct ism_dev *ism)
142 {
143 	int ret;
144 
145 	if (!ism->sba)
146 		return 0;
147 
148 	ret = ism_cmd_simple(ism, ISM_UNREG_SBA);
149 	if (ret && ret != ISM_ERROR)
150 		return -EIO;
151 
152 	dma_free_coherent(&ism->pdev->dev, PAGE_SIZE,
153 			  ism->sba, ism->sba_dma_addr);
154 
155 	ism->sba = NULL;
156 	ism->sba_dma_addr = 0;
157 
158 	return 0;
159 }
160 
161 static int unregister_ieq(struct ism_dev *ism)
162 {
163 	int ret;
164 
165 	if (!ism->ieq)
166 		return 0;
167 
168 	ret = ism_cmd_simple(ism, ISM_UNREG_IEQ);
169 	if (ret && ret != ISM_ERROR)
170 		return -EIO;
171 
172 	dma_free_coherent(&ism->pdev->dev, PAGE_SIZE,
173 			  ism->ieq, ism->ieq_dma_addr);
174 
175 	ism->ieq = NULL;
176 	ism->ieq_dma_addr = 0;
177 
178 	return 0;
179 }
180 
181 static int ism_read_local_gid(struct dibs_dev *dibs)
182 {
183 	struct ism_dev *ism = dibs->drv_priv;
184 	union ism_read_gid cmd;
185 	int ret;
186 
187 	memset(&cmd, 0, sizeof(cmd));
188 	cmd.request.hdr.cmd = ISM_READ_GID;
189 	cmd.request.hdr.len = sizeof(cmd.request);
190 
191 	ret = ism_cmd(ism, &cmd);
192 	if (ret)
193 		goto out;
194 
195 	memset(&dibs->gid, 0, sizeof(dibs->gid));
196 	memcpy(&dibs->gid, &cmd.response.gid, sizeof(cmd.response.gid));
197 out:
198 	return ret;
199 }
200 
201 static int ism_query_rgid(struct dibs_dev *dibs, const uuid_t *rgid,
202 			  u32 vid_valid, u32 vid)
203 {
204 	struct ism_dev *ism = dibs->drv_priv;
205 	union ism_query_rgid cmd;
206 
207 	memset(&cmd, 0, sizeof(cmd));
208 	cmd.request.hdr.cmd = ISM_QUERY_RGID;
209 	cmd.request.hdr.len = sizeof(cmd.request);
210 
211 	memcpy(&cmd.request.rgid, rgid, sizeof(cmd.request.rgid));
212 	cmd.request.vlan_valid = vid_valid;
213 	cmd.request.vlan_id = vid;
214 
215 	return ism_cmd(ism, &cmd);
216 }
217 
218 static int ism_max_dmbs(void)
219 {
220 	return ISM_NR_DMBS;
221 }
222 
223 static void ism_free_dmb(struct ism_dev *ism, struct dibs_dmb *dmb)
224 {
225 	clear_bit(dmb->idx, ism->sba_bitmap);
226 	dma_unmap_page(&ism->pdev->dev, dmb->dma_addr, dmb->dmb_len,
227 		       DMA_FROM_DEVICE);
228 	folio_put(virt_to_folio(dmb->cpu_addr));
229 }
230 
231 static int ism_alloc_dmb(struct ism_dev *ism, struct dibs_dmb *dmb)
232 {
233 	struct folio *folio;
234 	unsigned long bit;
235 	int rc;
236 
237 	if (PAGE_ALIGN(dmb->dmb_len) > dma_get_max_seg_size(&ism->pdev->dev))
238 		return -EINVAL;
239 
240 	if (!dmb->idx) {
241 		bit = find_next_zero_bit(ism->sba_bitmap, ISM_NR_DMBS,
242 					 ISM_DMB_BIT_OFFSET);
243 		if (bit == ISM_NR_DMBS)
244 			return -ENOSPC;
245 
246 		dmb->idx = bit;
247 	}
248 	if (dmb->idx < ISM_DMB_BIT_OFFSET ||
249 	    test_and_set_bit(dmb->idx, ism->sba_bitmap))
250 		return -EINVAL;
251 
252 	folio = folio_alloc(GFP_KERNEL | __GFP_NOWARN | __GFP_NOMEMALLOC |
253 			    __GFP_NORETRY, get_order(dmb->dmb_len));
254 
255 	if (!folio) {
256 		rc = -ENOMEM;
257 		goto out_bit;
258 	}
259 
260 	dmb->cpu_addr = folio_address(folio);
261 	dmb->dma_addr = dma_map_page(&ism->pdev->dev,
262 				     virt_to_page(dmb->cpu_addr), 0,
263 				     dmb->dmb_len, DMA_FROM_DEVICE);
264 	if (dma_mapping_error(&ism->pdev->dev, dmb->dma_addr)) {
265 		rc = -ENOMEM;
266 		goto out_free;
267 	}
268 
269 	return 0;
270 
271 out_free:
272 	kfree(dmb->cpu_addr);
273 out_bit:
274 	clear_bit(dmb->idx, ism->sba_bitmap);
275 	return rc;
276 }
277 
278 static int ism_register_dmb(struct dibs_dev *dibs, struct dibs_dmb *dmb,
279 			    struct dibs_client *client)
280 {
281 	struct ism_dev *ism = dibs->drv_priv;
282 	union ism_reg_dmb cmd;
283 	unsigned long flags;
284 	int ret;
285 
286 	ret = ism_alloc_dmb(ism, dmb);
287 	if (ret)
288 		goto out;
289 
290 	memset(&cmd, 0, sizeof(cmd));
291 	cmd.request.hdr.cmd = ISM_REG_DMB;
292 	cmd.request.hdr.len = sizeof(cmd.request);
293 
294 	cmd.request.dmb = dmb->dma_addr;
295 	cmd.request.dmb_len = dmb->dmb_len;
296 	cmd.request.sba_idx = dmb->idx;
297 	cmd.request.vlan_valid = dmb->vlan_valid;
298 	cmd.request.vlan_id = dmb->vlan_id;
299 	memcpy(&cmd.request.rgid, &dmb->rgid, sizeof(u64));
300 
301 	ret = ism_cmd(ism, &cmd);
302 	if (ret) {
303 		ism_free_dmb(ism, dmb);
304 		goto out;
305 	}
306 	dmb->dmb_tok = cmd.response.dmb_tok;
307 	spin_lock_irqsave(&dibs->lock, flags);
308 	dibs->dmb_clientid_arr[dmb->idx - ISM_DMB_BIT_OFFSET] = client->id;
309 	spin_unlock_irqrestore(&dibs->lock, flags);
310 out:
311 	return ret;
312 }
313 
314 static int ism_unregister_dmb(struct dibs_dev *dibs, struct dibs_dmb *dmb)
315 {
316 	struct ism_dev *ism = dibs->drv_priv;
317 	union ism_unreg_dmb cmd;
318 	unsigned long flags;
319 	int ret;
320 
321 	memset(&cmd, 0, sizeof(cmd));
322 	cmd.request.hdr.cmd = ISM_UNREG_DMB;
323 	cmd.request.hdr.len = sizeof(cmd.request);
324 
325 	cmd.request.dmb_tok = dmb->dmb_tok;
326 
327 	spin_lock_irqsave(&dibs->lock, flags);
328 	dibs->dmb_clientid_arr[dmb->idx - ISM_DMB_BIT_OFFSET] = NO_DIBS_CLIENT;
329 	spin_unlock_irqrestore(&dibs->lock, flags);
330 
331 	ret = ism_cmd(ism, &cmd);
332 	if (ret && ret != ISM_ERROR)
333 		goto out;
334 
335 	ism_free_dmb(ism, dmb);
336 out:
337 	return ret;
338 }
339 
340 static int ism_add_vlan_id(struct dibs_dev *dibs, u64 vlan_id)
341 {
342 	struct ism_dev *ism = dibs->drv_priv;
343 	union ism_set_vlan_id cmd;
344 
345 	memset(&cmd, 0, sizeof(cmd));
346 	cmd.request.hdr.cmd = ISM_ADD_VLAN_ID;
347 	cmd.request.hdr.len = sizeof(cmd.request);
348 
349 	cmd.request.vlan_id = vlan_id;
350 
351 	return ism_cmd(ism, &cmd);
352 }
353 
354 static int ism_del_vlan_id(struct dibs_dev *dibs, u64 vlan_id)
355 {
356 	struct ism_dev *ism = dibs->drv_priv;
357 	union ism_set_vlan_id cmd;
358 
359 	memset(&cmd, 0, sizeof(cmd));
360 	cmd.request.hdr.cmd = ISM_DEL_VLAN_ID;
361 	cmd.request.hdr.len = sizeof(cmd.request);
362 
363 	cmd.request.vlan_id = vlan_id;
364 
365 	return ism_cmd(ism, &cmd);
366 }
367 
368 static int ism_signal_ieq(struct dibs_dev *dibs, const uuid_t *rgid,
369 			  u32 trigger_irq, u32 event_code, u64 info)
370 {
371 	struct ism_dev *ism = dibs->drv_priv;
372 	union ism_sig_ieq cmd;
373 
374 	memset(&cmd, 0, sizeof(cmd));
375 	cmd.request.hdr.cmd = ISM_SIGNAL_IEQ;
376 	cmd.request.hdr.len = sizeof(cmd.request);
377 
378 	memcpy(&cmd.request.rgid, rgid, sizeof(cmd.request.rgid));
379 	cmd.request.trigger_irq = trigger_irq;
380 	cmd.request.event_code = event_code;
381 	cmd.request.info = info;
382 
383 	return ism_cmd(ism, &cmd);
384 }
385 
386 static unsigned int max_bytes(unsigned int start, unsigned int len,
387 			      unsigned int boundary)
388 {
389 	return min(boundary - (start & (boundary - 1)), len);
390 }
391 
392 static int ism_move(struct dibs_dev *dibs, u64 dmb_tok, unsigned int idx,
393 		    bool sf, unsigned int offset, void *data,
394 		    unsigned int size)
395 {
396 	struct ism_dev *ism = dibs->drv_priv;
397 	unsigned int bytes;
398 	u64 dmb_req;
399 	int ret;
400 
401 	while (size) {
402 		bytes = max_bytes(offset, size, PAGE_SIZE);
403 		dmb_req = ISM_CREATE_REQ(dmb_tok, idx, size == bytes ? sf : 0,
404 					 offset);
405 
406 		ret = __ism_move(ism, dmb_req, data, bytes);
407 		if (ret)
408 			return ret;
409 
410 		size -= bytes;
411 		data += bytes;
412 		offset += bytes;
413 	}
414 
415 	return 0;
416 }
417 
418 static u16 ism_get_chid(struct dibs_dev *dibs)
419 {
420 	struct ism_dev *ism = dibs->drv_priv;
421 
422 	if (!ism || !ism->pdev)
423 		return 0;
424 
425 	return to_zpci(ism->pdev)->pchid;
426 }
427 
428 static int ism_match_event_type(u32 s390_event_type)
429 {
430 	switch (s390_event_type) {
431 	case ISM_EVENT_BUF:
432 		return DIBS_BUF_EVENT;
433 	case ISM_EVENT_DEV:
434 		return DIBS_DEV_EVENT;
435 	case ISM_EVENT_SWR:
436 		return DIBS_SW_EVENT;
437 	default:
438 		return DIBS_OTHER_TYPE;
439 	}
440 }
441 
442 static int ism_match_event_subtype(u32 s390_event_subtype)
443 {
444 	switch (s390_event_subtype) {
445 	case ISM_BUF_DMB_UNREGISTERED:
446 		return DIBS_BUF_UNREGISTERED;
447 	case ISM_DEV_GID_DISABLED:
448 		return DIBS_DEV_DISABLED;
449 	case ISM_DEV_GID_ERR_STATE:
450 		return DIBS_DEV_ERR_STATE;
451 	default:
452 		return DIBS_OTHER_SUBTYPE;
453 	}
454 }
455 
456 static void ism_handle_event(struct ism_dev *ism)
457 {
458 	struct dibs_dev *dibs = ism->dibs;
459 	struct dibs_event event;
460 	struct ism_event *entry;
461 	struct dibs_client *clt;
462 	int i;
463 
464 	while ((ism->ieq_idx + 1) != READ_ONCE(ism->ieq->header.idx)) {
465 		if (++ism->ieq_idx == ARRAY_SIZE(ism->ieq->entry))
466 			ism->ieq_idx = 0;
467 
468 		entry = &ism->ieq->entry[ism->ieq_idx];
469 		debug_event(ism_debug_info, 2, entry, sizeof(*entry));
470 		__memset(&event, 0, sizeof(event));
471 		event.type = ism_match_event_type(entry->type);
472 		if (event.type == DIBS_SW_EVENT)
473 			event.subtype = entry->code;
474 		else
475 			event.subtype = ism_match_event_subtype(entry->code);
476 		event.time = entry->time;
477 		event.data = entry->info;
478 		switch (event.type) {
479 		case DIBS_BUF_EVENT:
480 			event.buffer_tok = entry->tok;
481 			break;
482 		case DIBS_DEV_EVENT:
483 		case DIBS_SW_EVENT:
484 			memcpy(&event.gid, &entry->tok, sizeof(u64));
485 		}
486 		for (i = 0; i < MAX_DIBS_CLIENTS; ++i) {
487 			clt = dibs->subs[i];
488 			if (clt)
489 				clt->ops->handle_event(dibs, &event);
490 		}
491 	}
492 }
493 
494 static irqreturn_t ism_handle_irq(int irq, void *data)
495 {
496 	struct ism_dev *ism = data;
497 	unsigned long bit, end;
498 	struct dibs_dev *dibs;
499 	unsigned long *bv;
500 	u16 dmbemask;
501 	u8 client_id;
502 
503 	dibs = ism->dibs;
504 
505 	bv = (void *) &ism->sba->dmb_bits[ISM_DMB_WORD_OFFSET];
506 	end = sizeof(ism->sba->dmb_bits) * BITS_PER_BYTE - ISM_DMB_BIT_OFFSET;
507 
508 	spin_lock(&dibs->lock);
509 	ism->sba->s = 0;
510 	barrier();
511 	for (bit = 0;;) {
512 		bit = find_next_bit_inv(bv, end, bit);
513 		if (bit >= end)
514 			break;
515 
516 		clear_bit_inv(bit, bv);
517 		dmbemask = ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET];
518 		ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET] = 0;
519 		barrier();
520 		client_id = dibs->dmb_clientid_arr[bit];
521 		if (unlikely(client_id == NO_DIBS_CLIENT ||
522 			     !dibs->subs[client_id]))
523 			continue;
524 		dibs->subs[client_id]->ops->handle_irq(dibs,
525 						       bit + ISM_DMB_BIT_OFFSET,
526 						       dmbemask);
527 	}
528 
529 	if (ism->sba->e) {
530 		ism->sba->e = 0;
531 		barrier();
532 		ism_handle_event(ism);
533 	}
534 	spin_unlock(&dibs->lock);
535 	return IRQ_HANDLED;
536 }
537 
538 static const struct dibs_dev_ops ism_ops = {
539 	.get_fabric_id = ism_get_chid,
540 	.query_remote_gid = ism_query_rgid,
541 	.max_dmbs = ism_max_dmbs,
542 	.register_dmb = ism_register_dmb,
543 	.unregister_dmb = ism_unregister_dmb,
544 	.move_data = ism_move,
545 	.add_vlan_id = ism_add_vlan_id,
546 	.del_vlan_id = ism_del_vlan_id,
547 	.signal_event = ism_signal_ieq,
548 };
549 
550 static int ism_dev_init(struct ism_dev *ism)
551 {
552 	struct pci_dev *pdev = ism->pdev;
553 	int ret;
554 
555 	ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
556 	if (ret <= 0)
557 		goto out;
558 
559 	ret = request_irq(pci_irq_vector(pdev, 0), ism_handle_irq, 0,
560 			  pci_name(pdev), ism);
561 	if (ret)
562 		goto free_vectors;
563 
564 	ret = register_sba(ism);
565 	if (ret)
566 		goto free_irq;
567 
568 	ret = register_ieq(ism);
569 	if (ret)
570 		goto unreg_sba;
571 
572 	query_info(ism);
573 	return 0;
574 
575 unreg_sba:
576 	unregister_sba(ism);
577 free_irq:
578 	free_irq(pci_irq_vector(pdev, 0), ism);
579 free_vectors:
580 	pci_free_irq_vectors(pdev);
581 out:
582 	return ret;
583 }
584 
585 static void ism_dev_exit(struct ism_dev *ism)
586 {
587 	struct pci_dev *pdev = ism->pdev;
588 
589 	unregister_ieq(ism);
590 	unregister_sba(ism);
591 	free_irq(pci_irq_vector(pdev, 0), ism);
592 	pci_free_irq_vectors(pdev);
593 }
594 
595 static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
596 {
597 	struct dibs_dev *dibs;
598 	struct zpci_dev *zdev;
599 	struct ism_dev *ism;
600 	int ret;
601 
602 	ism = kzalloc(sizeof(*ism), GFP_KERNEL);
603 	if (!ism)
604 		return -ENOMEM;
605 
606 	spin_lock_init(&ism->cmd_lock);
607 	dev_set_drvdata(&pdev->dev, ism);
608 	ism->pdev = pdev;
609 
610 	ret = pci_enable_device_mem(pdev);
611 	if (ret)
612 		goto err_dev;
613 
614 	ret = pci_request_mem_regions(pdev, DRV_NAME);
615 	if (ret)
616 		goto err_disable;
617 
618 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
619 	if (ret)
620 		goto err_resource;
621 
622 	dma_set_seg_boundary(&pdev->dev, SZ_1M - 1);
623 	dma_set_max_seg_size(&pdev->dev, SZ_1M);
624 	pci_set_master(pdev);
625 
626 	dibs = dibs_dev_alloc();
627 	if (!dibs) {
628 		ret = -ENOMEM;
629 		goto err_resource;
630 	}
631 	/* set this up before we enable interrupts */
632 	ism->dibs = dibs;
633 	dibs->drv_priv = ism;
634 	dibs->ops = &ism_ops;
635 
636 	/* enable ism device, but any interrupts and events will be ignored
637 	 * before dibs_dev_add() adds it to any clients.
638 	 */
639 	ret = ism_dev_init(ism);
640 	if (ret)
641 		goto err_dibs;
642 
643 	/* after ism_dev_init() we can call ism function to set gid */
644 	ret = ism_read_local_gid(dibs);
645 	if (ret)
646 		goto err_ism;
647 
648 	dibs->dev.parent = &pdev->dev;
649 
650 	zdev = to_zpci(pdev);
651 	dev_set_name(&dibs->dev, "ism%x", zdev->uid ? zdev->uid : zdev->fid);
652 
653 	ret = dibs_dev_add(dibs);
654 	if (ret)
655 		goto err_ism;
656 
657 	return 0;
658 
659 err_ism:
660 	ism_dev_exit(ism);
661 err_dibs:
662 	/* pairs with dibs_dev_alloc() */
663 	put_device(&dibs->dev);
664 err_resource:
665 	pci_release_mem_regions(pdev);
666 err_disable:
667 	pci_disable_device(pdev);
668 err_dev:
669 	dev_set_drvdata(&pdev->dev, NULL);
670 	kfree(ism);
671 
672 	return ret;
673 }
674 
675 static void ism_remove(struct pci_dev *pdev)
676 {
677 	struct ism_dev *ism = dev_get_drvdata(&pdev->dev);
678 	struct dibs_dev *dibs = ism->dibs;
679 
680 	dibs_dev_del(dibs);
681 	ism_dev_exit(ism);
682 	/* pairs with dibs_dev_alloc() */
683 	put_device(&dibs->dev);
684 
685 	pci_release_mem_regions(pdev);
686 	pci_disable_device(pdev);
687 	dev_set_drvdata(&pdev->dev, NULL);
688 	kfree(ism);
689 }
690 
691 static struct pci_driver ism_driver = {
692 	.name	  = DRV_NAME,
693 	.id_table = ism_device_table,
694 	.probe	  = ism_probe,
695 	.remove	  = ism_remove,
696 };
697 
698 static int __init ism_init(void)
699 {
700 	int ret;
701 
702 	ism_debug_info = debug_register("ism", 2, 1, 16);
703 	if (!ism_debug_info)
704 		return -ENODEV;
705 
706 	debug_register_view(ism_debug_info, &debug_hex_ascii_view);
707 	ret = pci_register_driver(&ism_driver);
708 	if (ret)
709 		debug_unregister(ism_debug_info);
710 
711 	return ret;
712 }
713 
714 static void __exit ism_exit(void)
715 {
716 	pci_unregister_driver(&ism_driver);
717 	debug_unregister(ism_debug_info);
718 }
719 
720 module_init(ism_init);
721 module_exit(ism_exit);
722