xref: /linux/drivers/infiniband/hw/mthca/mthca_main.c (revision 776cfebb430c7b22c208b1b17add97f354d97cab)
1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  * $Id: mthca_main.c 1396 2004-12-28 04:10:27Z roland $
33  */
34 
35 #include <linux/config.h>
36 #include <linux/version.h>
37 #include <linux/module.h>
38 #include <linux/init.h>
39 #include <linux/errno.h>
40 #include <linux/pci.h>
41 #include <linux/interrupt.h>
42 
43 #include "mthca_dev.h"
44 #include "mthca_config_reg.h"
45 #include "mthca_cmd.h"
46 #include "mthca_profile.h"
47 #include "mthca_memfree.h"
48 
49 MODULE_AUTHOR("Roland Dreier");
50 MODULE_DESCRIPTION("Mellanox InfiniBand HCA low-level driver");
51 MODULE_LICENSE("Dual BSD/GPL");
52 MODULE_VERSION(DRV_VERSION);
53 
54 #ifdef CONFIG_PCI_MSI
55 
56 static int msi_x = 0;
57 module_param(msi_x, int, 0444);
58 MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
59 
60 static int msi = 0;
61 module_param(msi, int, 0444);
62 MODULE_PARM_DESC(msi, "attempt to use MSI if nonzero");
63 
64 #else /* CONFIG_PCI_MSI */
65 
66 #define msi_x (0)
67 #define msi   (0)
68 
69 #endif /* CONFIG_PCI_MSI */
70 
71 static const char mthca_version[] __devinitdata =
72 	"ib_mthca: Mellanox InfiniBand HCA driver v"
73 	DRV_VERSION " (" DRV_RELDATE ")\n";
74 
75 static struct mthca_profile default_profile = {
76 	.num_qp		   = 1 << 16,
77 	.rdb_per_qp	   = 4,
78 	.num_cq		   = 1 << 16,
79 	.num_mcg	   = 1 << 13,
80 	.num_mpt	   = 1 << 17,
81 	.num_mtt	   = 1 << 20,
82 	.num_udav	   = 1 << 15,	/* Tavor only */
83 	.fmr_reserved_mtts = 1 << 18,	/* Tavor only */
84 	.uarc_size	   = 1 << 18,	/* Arbel only */
85 };
86 
87 static int __devinit mthca_tune_pci(struct mthca_dev *mdev)
88 {
89 	int cap;
90 	u16 val;
91 
92 	/* First try to max out Read Byte Count */
93 	cap = pci_find_capability(mdev->pdev, PCI_CAP_ID_PCIX);
94 	if (cap) {
95 		if (pci_read_config_word(mdev->pdev, cap + PCI_X_CMD, &val)) {
96 			mthca_err(mdev, "Couldn't read PCI-X command register, "
97 				  "aborting.\n");
98 			return -ENODEV;
99 		}
100 		val = (val & ~PCI_X_CMD_MAX_READ) | (3 << 2);
101 		if (pci_write_config_word(mdev->pdev, cap + PCI_X_CMD, val)) {
102 			mthca_err(mdev, "Couldn't write PCI-X command register, "
103 				  "aborting.\n");
104 			return -ENODEV;
105 		}
106 	} else if (!(mdev->mthca_flags & MTHCA_FLAG_PCIE))
107 		mthca_info(mdev, "No PCI-X capability, not setting RBC.\n");
108 
109 	cap = pci_find_capability(mdev->pdev, PCI_CAP_ID_EXP);
110 	if (cap) {
111 		if (pci_read_config_word(mdev->pdev, cap + PCI_EXP_DEVCTL, &val)) {
112 			mthca_err(mdev, "Couldn't read PCI Express device control "
113 				  "register, aborting.\n");
114 			return -ENODEV;
115 		}
116 		val = (val & ~PCI_EXP_DEVCTL_READRQ) | (5 << 12);
117 		if (pci_write_config_word(mdev->pdev, cap + PCI_EXP_DEVCTL, val)) {
118 			mthca_err(mdev, "Couldn't write PCI Express device control "
119 				  "register, aborting.\n");
120 			return -ENODEV;
121 		}
122 	} else if (mdev->mthca_flags & MTHCA_FLAG_PCIE)
123 		mthca_info(mdev, "No PCI Express capability, "
124 			   "not setting Max Read Request Size.\n");
125 
126 	return 0;
127 }
128 
129 static int __devinit mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim *dev_lim)
130 {
131 	int err;
132 	u8 status;
133 
134 	err = mthca_QUERY_DEV_LIM(mdev, dev_lim, &status);
135 	if (err) {
136 		mthca_err(mdev, "QUERY_DEV_LIM command failed, aborting.\n");
137 		return err;
138 	}
139 	if (status) {
140 		mthca_err(mdev, "QUERY_DEV_LIM returned status 0x%02x, "
141 			  "aborting.\n", status);
142 		return -EINVAL;
143 	}
144 	if (dev_lim->min_page_sz > PAGE_SIZE) {
145 		mthca_err(mdev, "HCA minimum page size of %d bigger than "
146 			  "kernel PAGE_SIZE of %ld, aborting.\n",
147 			  dev_lim->min_page_sz, PAGE_SIZE);
148 		return -ENODEV;
149 	}
150 	if (dev_lim->num_ports > MTHCA_MAX_PORTS) {
151 		mthca_err(mdev, "HCA has %d ports, but we only support %d, "
152 			  "aborting.\n",
153 			  dev_lim->num_ports, MTHCA_MAX_PORTS);
154 		return -ENODEV;
155 	}
156 
157 	mdev->limits.num_ports      	= dev_lim->num_ports;
158 	mdev->limits.vl_cap             = dev_lim->max_vl;
159 	mdev->limits.mtu_cap            = dev_lim->max_mtu;
160 	mdev->limits.gid_table_len  	= dev_lim->max_gids;
161 	mdev->limits.pkey_table_len 	= dev_lim->max_pkeys;
162 	mdev->limits.local_ca_ack_delay = dev_lim->local_ca_ack_delay;
163 	mdev->limits.max_sg             = dev_lim->max_sg;
164 	mdev->limits.reserved_qps       = dev_lim->reserved_qps;
165 	mdev->limits.reserved_srqs      = dev_lim->reserved_srqs;
166 	mdev->limits.reserved_eecs      = dev_lim->reserved_eecs;
167 	mdev->limits.reserved_cqs       = dev_lim->reserved_cqs;
168 	mdev->limits.reserved_eqs       = dev_lim->reserved_eqs;
169 	mdev->limits.reserved_mtts      = dev_lim->reserved_mtts;
170 	mdev->limits.reserved_mrws      = dev_lim->reserved_mrws;
171 	mdev->limits.reserved_uars      = dev_lim->reserved_uars;
172 	mdev->limits.reserved_pds       = dev_lim->reserved_pds;
173 
174 	/* IB_DEVICE_RESIZE_MAX_WR not supported by driver.
175 	   May be doable since hardware supports it for SRQ.
176 
177 	   IB_DEVICE_N_NOTIFY_CQ is supported by hardware but not by driver.
178 
179 	   IB_DEVICE_SRQ_RESIZE is supported by hardware but SRQ is not
180 	   supported by driver. */
181 	mdev->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
182 		IB_DEVICE_PORT_ACTIVE_EVENT |
183 		IB_DEVICE_SYS_IMAGE_GUID |
184 		IB_DEVICE_RC_RNR_NAK_GEN;
185 
186 	if (dev_lim->flags & DEV_LIM_FLAG_BAD_PKEY_CNTR)
187 		mdev->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
188 
189 	if (dev_lim->flags & DEV_LIM_FLAG_BAD_QKEY_CNTR)
190 		mdev->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
191 
192 	if (dev_lim->flags & DEV_LIM_FLAG_RAW_MULTI)
193 		mdev->device_cap_flags |= IB_DEVICE_RAW_MULTI;
194 
195 	if (dev_lim->flags & DEV_LIM_FLAG_AUTO_PATH_MIG)
196 		mdev->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
197 
198 	if (dev_lim->flags & DEV_LIM_FLAG_UD_AV_PORT_ENFORCE)
199 		mdev->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
200 
201 	if (dev_lim->flags & DEV_LIM_FLAG_SRQ)
202 		mdev->mthca_flags |= MTHCA_FLAG_SRQ;
203 
204 	return 0;
205 }
206 
207 static int __devinit mthca_init_tavor(struct mthca_dev *mdev)
208 {
209 	u8 status;
210 	int err;
211 	struct mthca_dev_lim        dev_lim;
212 	struct mthca_profile        profile;
213 	struct mthca_init_hca_param init_hca;
214 	struct mthca_adapter        adapter;
215 
216 	err = mthca_SYS_EN(mdev, &status);
217 	if (err) {
218 		mthca_err(mdev, "SYS_EN command failed, aborting.\n");
219 		return err;
220 	}
221 	if (status) {
222 		mthca_err(mdev, "SYS_EN returned status 0x%02x, "
223 			  "aborting.\n", status);
224 		return -EINVAL;
225 	}
226 
227 	err = mthca_QUERY_FW(mdev, &status);
228 	if (err) {
229 		mthca_err(mdev, "QUERY_FW command failed, aborting.\n");
230 		goto err_disable;
231 	}
232 	if (status) {
233 		mthca_err(mdev, "QUERY_FW returned status 0x%02x, "
234 			  "aborting.\n", status);
235 		err = -EINVAL;
236 		goto err_disable;
237 	}
238 	err = mthca_QUERY_DDR(mdev, &status);
239 	if (err) {
240 		mthca_err(mdev, "QUERY_DDR command failed, aborting.\n");
241 		goto err_disable;
242 	}
243 	if (status) {
244 		mthca_err(mdev, "QUERY_DDR returned status 0x%02x, "
245 			  "aborting.\n", status);
246 		err = -EINVAL;
247 		goto err_disable;
248 	}
249 
250 	err = mthca_dev_lim(mdev, &dev_lim);
251 
252 	profile = default_profile;
253 	profile.num_uar   = dev_lim.uar_size / PAGE_SIZE;
254 	profile.uarc_size = 0;
255 
256 	err = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca);
257 	if (err < 0)
258 		goto err_disable;
259 
260 	err = mthca_INIT_HCA(mdev, &init_hca, &status);
261 	if (err) {
262 		mthca_err(mdev, "INIT_HCA command failed, aborting.\n");
263 		goto err_disable;
264 	}
265 	if (status) {
266 		mthca_err(mdev, "INIT_HCA returned status 0x%02x, "
267 			  "aborting.\n", status);
268 		err = -EINVAL;
269 		goto err_disable;
270 	}
271 
272 	err = mthca_QUERY_ADAPTER(mdev, &adapter, &status);
273 	if (err) {
274 		mthca_err(mdev, "QUERY_ADAPTER command failed, aborting.\n");
275 		goto err_close;
276 	}
277 	if (status) {
278 		mthca_err(mdev, "QUERY_ADAPTER returned status 0x%02x, "
279 			  "aborting.\n", status);
280 		err = -EINVAL;
281 		goto err_close;
282 	}
283 
284 	mdev->eq_table.inta_pin = adapter.inta_pin;
285 	mdev->rev_id            = adapter.revision_id;
286 
287 	return 0;
288 
289 err_close:
290 	mthca_CLOSE_HCA(mdev, 0, &status);
291 
292 err_disable:
293 	mthca_SYS_DIS(mdev, &status);
294 
295 	return err;
296 }
297 
298 static int __devinit mthca_load_fw(struct mthca_dev *mdev)
299 {
300 	u8 status;
301 	int err;
302 
303 	/* FIXME: use HCA-attached memory for FW if present */
304 
305 	mdev->fw.arbel.fw_icm =
306 		mthca_alloc_icm(mdev, mdev->fw.arbel.fw_pages,
307 				GFP_HIGHUSER | __GFP_NOWARN);
308 	if (!mdev->fw.arbel.fw_icm) {
309 		mthca_err(mdev, "Couldn't allocate FW area, aborting.\n");
310 		return -ENOMEM;
311 	}
312 
313 	err = mthca_MAP_FA(mdev, mdev->fw.arbel.fw_icm, &status);
314 	if (err) {
315 		mthca_err(mdev, "MAP_FA command failed, aborting.\n");
316 		goto err_free;
317 	}
318 	if (status) {
319 		mthca_err(mdev, "MAP_FA returned status 0x%02x, aborting.\n", status);
320 		err = -EINVAL;
321 		goto err_free;
322 	}
323 	err = mthca_RUN_FW(mdev, &status);
324 	if (err) {
325 		mthca_err(mdev, "RUN_FW command failed, aborting.\n");
326 		goto err_unmap_fa;
327 	}
328 	if (status) {
329 		mthca_err(mdev, "RUN_FW returned status 0x%02x, aborting.\n", status);
330 		err = -EINVAL;
331 		goto err_unmap_fa;
332 	}
333 
334 	return 0;
335 
336 err_unmap_fa:
337 	mthca_UNMAP_FA(mdev, &status);
338 
339 err_free:
340 	mthca_free_icm(mdev, mdev->fw.arbel.fw_icm);
341 	return err;
342 }
343 
344 static int __devinit mthca_init_icm(struct mthca_dev *mdev,
345 				    struct mthca_dev_lim *dev_lim,
346 				    struct mthca_init_hca_param *init_hca,
347 				    u64 icm_size)
348 {
349 	u64 aux_pages;
350 	u8 status;
351 	int err;
352 
353 	err = mthca_SET_ICM_SIZE(mdev, icm_size, &aux_pages, &status);
354 	if (err) {
355 		mthca_err(mdev, "SET_ICM_SIZE command failed, aborting.\n");
356 		return err;
357 	}
358 	if (status) {
359 		mthca_err(mdev, "SET_ICM_SIZE returned status 0x%02x, "
360 			  "aborting.\n", status);
361 		return -EINVAL;
362 	}
363 
364 	mthca_dbg(mdev, "%lld KB of HCA context requires %lld KB aux memory.\n",
365 		  (unsigned long long) icm_size >> 10,
366 		  (unsigned long long) aux_pages << 2);
367 
368 	mdev->fw.arbel.aux_icm = mthca_alloc_icm(mdev, aux_pages,
369 						 GFP_HIGHUSER | __GFP_NOWARN);
370 	if (!mdev->fw.arbel.aux_icm) {
371 		mthca_err(mdev, "Couldn't allocate aux memory, aborting.\n");
372 		return -ENOMEM;
373 	}
374 
375 	err = mthca_MAP_ICM_AUX(mdev, mdev->fw.arbel.aux_icm, &status);
376 	if (err) {
377 		mthca_err(mdev, "MAP_ICM_AUX command failed, aborting.\n");
378 		goto err_free_aux;
379 	}
380 	if (status) {
381 		mthca_err(mdev, "MAP_ICM_AUX returned status 0x%02x, aborting.\n", status);
382 		err = -EINVAL;
383 		goto err_free_aux;
384 	}
385 
386 	err = mthca_map_eq_icm(mdev, init_hca->eqc_base);
387 	if (err) {
388 		mthca_err(mdev, "Failed to map EQ context memory, aborting.\n");
389 		goto err_unmap_aux;
390 	}
391 
392 	mdev->mr_table.mtt_table = mthca_alloc_icm_table(mdev, init_hca->mtt_base,
393 							 MTHCA_MTT_SEG_SIZE,
394 							 mdev->limits.num_mtt_segs,
395 							 mdev->limits.reserved_mtts, 1);
396 	if (!mdev->mr_table.mtt_table) {
397 		mthca_err(mdev, "Failed to map MTT context memory, aborting.\n");
398 		err = -ENOMEM;
399 		goto err_unmap_eq;
400 	}
401 
402 	mdev->mr_table.mpt_table = mthca_alloc_icm_table(mdev, init_hca->mpt_base,
403 							 dev_lim->mpt_entry_sz,
404 							 mdev->limits.num_mpts,
405 							 mdev->limits.reserved_mrws, 1);
406 	if (!mdev->mr_table.mpt_table) {
407 		mthca_err(mdev, "Failed to map MPT context memory, aborting.\n");
408 		err = -ENOMEM;
409 		goto err_unmap_mtt;
410 	}
411 
412 	mdev->qp_table.qp_table = mthca_alloc_icm_table(mdev, init_hca->qpc_base,
413 							dev_lim->qpc_entry_sz,
414 							mdev->limits.num_qps,
415 							mdev->limits.reserved_qps, 0);
416 	if (!mdev->qp_table.qp_table) {
417 		mthca_err(mdev, "Failed to map QP context memory, aborting.\n");
418 		err = -ENOMEM;
419 		goto err_unmap_mpt;
420 	}
421 
422 	mdev->qp_table.eqp_table = mthca_alloc_icm_table(mdev, init_hca->eqpc_base,
423 							 dev_lim->eqpc_entry_sz,
424 							 mdev->limits.num_qps,
425 							 mdev->limits.reserved_qps, 0);
426 	if (!mdev->qp_table.eqp_table) {
427 		mthca_err(mdev, "Failed to map EQP context memory, aborting.\n");
428 		err = -ENOMEM;
429 		goto err_unmap_qp;
430 	}
431 
432 	mdev->qp_table.rdb_table = mthca_alloc_icm_table(mdev, init_hca->rdb_base,
433 							 MTHCA_RDB_ENTRY_SIZE,
434 							 mdev->limits.num_qps <<
435 							 mdev->qp_table.rdb_shift,
436 							 0, 0);
437 	if (!mdev->qp_table.rdb_table) {
438 		mthca_err(mdev, "Failed to map RDB context memory, aborting\n");
439 		err = -ENOMEM;
440 		goto err_unmap_eqp;
441 	}
442 
443        mdev->cq_table.table = mthca_alloc_icm_table(mdev, init_hca->cqc_base,
444 						     dev_lim->cqc_entry_sz,
445 						     mdev->limits.num_cqs,
446 						     mdev->limits.reserved_cqs, 0);
447 	if (!mdev->cq_table.table) {
448 		mthca_err(mdev, "Failed to map CQ context memory, aborting.\n");
449 		err = -ENOMEM;
450 		goto err_unmap_rdb;
451 	}
452 
453 	/*
454 	 * It's not strictly required, but for simplicity just map the
455 	 * whole multicast group table now.  The table isn't very big
456 	 * and it's a lot easier than trying to track ref counts.
457 	 */
458 	mdev->mcg_table.table = mthca_alloc_icm_table(mdev, init_hca->mc_base,
459 						      MTHCA_MGM_ENTRY_SIZE,
460 						      mdev->limits.num_mgms +
461 						      mdev->limits.num_amgms,
462 						      mdev->limits.num_mgms +
463 						      mdev->limits.num_amgms,
464 						      0);
465 	if (!mdev->mcg_table.table) {
466 		mthca_err(mdev, "Failed to map MCG context memory, aborting.\n");
467 		err = -ENOMEM;
468 		goto err_unmap_cq;
469 	}
470 
471 	return 0;
472 
473 err_unmap_cq:
474 	mthca_free_icm_table(mdev, mdev->cq_table.table);
475 
476 err_unmap_rdb:
477 	mthca_free_icm_table(mdev, mdev->qp_table.rdb_table);
478 
479 err_unmap_eqp:
480 	mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
481 
482 err_unmap_qp:
483 	mthca_free_icm_table(mdev, mdev->qp_table.qp_table);
484 
485 err_unmap_mpt:
486 	mthca_free_icm_table(mdev, mdev->mr_table.mpt_table);
487 
488 err_unmap_mtt:
489 	mthca_free_icm_table(mdev, mdev->mr_table.mtt_table);
490 
491 err_unmap_eq:
492 	mthca_unmap_eq_icm(mdev);
493 
494 err_unmap_aux:
495 	mthca_UNMAP_ICM_AUX(mdev, &status);
496 
497 err_free_aux:
498 	mthca_free_icm(mdev, mdev->fw.arbel.aux_icm);
499 
500 	return err;
501 }
502 
503 static int __devinit mthca_init_arbel(struct mthca_dev *mdev)
504 {
505 	struct mthca_dev_lim        dev_lim;
506 	struct mthca_profile        profile;
507 	struct mthca_init_hca_param init_hca;
508 	struct mthca_adapter        adapter;
509 	u64 icm_size;
510 	u8 status;
511 	int err;
512 
513 	err = mthca_QUERY_FW(mdev, &status);
514 	if (err) {
515 		mthca_err(mdev, "QUERY_FW command failed, aborting.\n");
516 		return err;
517 	}
518 	if (status) {
519 		mthca_err(mdev, "QUERY_FW returned status 0x%02x, "
520 			  "aborting.\n", status);
521 		return -EINVAL;
522 	}
523 
524 	err = mthca_ENABLE_LAM(mdev, &status);
525 	if (err) {
526 		mthca_err(mdev, "ENABLE_LAM command failed, aborting.\n");
527 		return err;
528 	}
529 	if (status == MTHCA_CMD_STAT_LAM_NOT_PRE) {
530 		mthca_dbg(mdev, "No HCA-attached memory (running in MemFree mode)\n");
531 		mdev->mthca_flags |= MTHCA_FLAG_NO_LAM;
532 	} else if (status) {
533 		mthca_err(mdev, "ENABLE_LAM returned status 0x%02x, "
534 			  "aborting.\n", status);
535 		return -EINVAL;
536 	}
537 
538 	err = mthca_load_fw(mdev);
539 	if (err) {
540 		mthca_err(mdev, "Failed to start FW, aborting.\n");
541 		goto err_disable;
542 	}
543 
544 	err = mthca_dev_lim(mdev, &dev_lim);
545 	if (err) {
546 		mthca_err(mdev, "QUERY_DEV_LIM command failed, aborting.\n");
547 		goto err_stop_fw;
548 	}
549 
550 	profile = default_profile;
551 	profile.num_uar  = dev_lim.uar_size / PAGE_SIZE;
552 	profile.num_udav = 0;
553 
554 	icm_size = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca);
555 	if ((int) icm_size < 0) {
556 		err = icm_size;
557 		goto err_stop_fw;
558 	}
559 
560 	err = mthca_init_icm(mdev, &dev_lim, &init_hca, icm_size);
561 	if (err)
562 		goto err_stop_fw;
563 
564 	err = mthca_INIT_HCA(mdev, &init_hca, &status);
565 	if (err) {
566 		mthca_err(mdev, "INIT_HCA command failed, aborting.\n");
567 		goto err_free_icm;
568 	}
569 	if (status) {
570 		mthca_err(mdev, "INIT_HCA returned status 0x%02x, "
571 			  "aborting.\n", status);
572 		err = -EINVAL;
573 		goto err_free_icm;
574 	}
575 
576 	err = mthca_QUERY_ADAPTER(mdev, &adapter, &status);
577 	if (err) {
578 		mthca_err(mdev, "QUERY_ADAPTER command failed, aborting.\n");
579 		goto err_free_icm;
580 	}
581 	if (status) {
582 		mthca_err(mdev, "QUERY_ADAPTER returned status 0x%02x, "
583 			  "aborting.\n", status);
584 		err = -EINVAL;
585 		goto err_free_icm;
586 	}
587 
588 	mdev->eq_table.inta_pin = adapter.inta_pin;
589 	mdev->rev_id            = adapter.revision_id;
590 
591 	return 0;
592 
593 err_free_icm:
594 	mthca_free_icm_table(mdev, mdev->cq_table.table);
595 	mthca_free_icm_table(mdev, mdev->qp_table.rdb_table);
596 	mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
597 	mthca_free_icm_table(mdev, mdev->qp_table.qp_table);
598 	mthca_free_icm_table(mdev, mdev->mr_table.mpt_table);
599 	mthca_free_icm_table(mdev, mdev->mr_table.mtt_table);
600 	mthca_unmap_eq_icm(mdev);
601 
602 	mthca_UNMAP_ICM_AUX(mdev, &status);
603 	mthca_free_icm(mdev, mdev->fw.arbel.aux_icm);
604 
605 err_stop_fw:
606 	mthca_UNMAP_FA(mdev, &status);
607 	mthca_free_icm(mdev, mdev->fw.arbel.fw_icm);
608 
609 err_disable:
610 	if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM))
611 		mthca_DISABLE_LAM(mdev, &status);
612 
613 	return err;
614 }
615 
616 static int __devinit mthca_init_hca(struct mthca_dev *mdev)
617 {
618 	if (mthca_is_memfree(mdev))
619 		return mthca_init_arbel(mdev);
620 	else
621 		return mthca_init_tavor(mdev);
622 }
623 
624 static int __devinit mthca_setup_hca(struct mthca_dev *dev)
625 {
626 	int err;
627 	u8 status;
628 
629 	MTHCA_INIT_DOORBELL_LOCK(&dev->doorbell_lock);
630 
631 	err = mthca_init_uar_table(dev);
632 	if (err) {
633 		mthca_err(dev, "Failed to initialize "
634 			  "user access region table, aborting.\n");
635 		return err;
636 	}
637 
638 	err = mthca_uar_alloc(dev, &dev->driver_uar);
639 	if (err) {
640 		mthca_err(dev, "Failed to allocate driver access region, "
641 			  "aborting.\n");
642 		goto err_uar_table_free;
643 	}
644 
645 	dev->kar = ioremap(dev->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
646 	if (!dev->kar) {
647 		mthca_err(dev, "Couldn't map kernel access region, "
648 			  "aborting.\n");
649 		err = -ENOMEM;
650 		goto err_uar_free;
651 	}
652 
653 	err = mthca_init_pd_table(dev);
654 	if (err) {
655 		mthca_err(dev, "Failed to initialize "
656 			  "protection domain table, aborting.\n");
657 		goto err_kar_unmap;
658 	}
659 
660 	err = mthca_init_mr_table(dev);
661 	if (err) {
662 		mthca_err(dev, "Failed to initialize "
663 			  "memory region table, aborting.\n");
664 		goto err_pd_table_free;
665 	}
666 
667 	err = mthca_pd_alloc(dev, &dev->driver_pd);
668 	if (err) {
669 		mthca_err(dev, "Failed to create driver PD, "
670 			  "aborting.\n");
671 		goto err_mr_table_free;
672 	}
673 
674 	err = mthca_init_eq_table(dev);
675 	if (err) {
676 		mthca_err(dev, "Failed to initialize "
677 			  "event queue table, aborting.\n");
678 		goto err_pd_free;
679 	}
680 
681 	err = mthca_cmd_use_events(dev);
682 	if (err) {
683 		mthca_err(dev, "Failed to switch to event-driven "
684 			  "firmware commands, aborting.\n");
685 		goto err_eq_table_free;
686 	}
687 
688 	err = mthca_NOP(dev, &status);
689 	if (err || status) {
690 		mthca_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting.\n",
691 			  dev->mthca_flags & MTHCA_FLAG_MSI_X ?
692 			  dev->eq_table.eq[MTHCA_EQ_CMD].msi_x_vector :
693 			  dev->pdev->irq);
694 		if (dev->mthca_flags & (MTHCA_FLAG_MSI | MTHCA_FLAG_MSI_X))
695 			mthca_err(dev, "Try again with MSI/MSI-X disabled.\n");
696 		else
697 			mthca_err(dev, "BIOS or ACPI interrupt routing problem?\n");
698 
699 		goto err_cmd_poll;
700 	}
701 
702 	mthca_dbg(dev, "NOP command IRQ test passed\n");
703 
704 	err = mthca_init_cq_table(dev);
705 	if (err) {
706 		mthca_err(dev, "Failed to initialize "
707 			  "completion queue table, aborting.\n");
708 		goto err_cmd_poll;
709 	}
710 
711 	err = mthca_init_qp_table(dev);
712 	if (err) {
713 		mthca_err(dev, "Failed to initialize "
714 			  "queue pair table, aborting.\n");
715 		goto err_cq_table_free;
716 	}
717 
718 	err = mthca_init_av_table(dev);
719 	if (err) {
720 		mthca_err(dev, "Failed to initialize "
721 			  "address vector table, aborting.\n");
722 		goto err_qp_table_free;
723 	}
724 
725 	err = mthca_init_mcg_table(dev);
726 	if (err) {
727 		mthca_err(dev, "Failed to initialize "
728 			  "multicast group table, aborting.\n");
729 		goto err_av_table_free;
730 	}
731 
732 	return 0;
733 
734 err_av_table_free:
735 	mthca_cleanup_av_table(dev);
736 
737 err_qp_table_free:
738 	mthca_cleanup_qp_table(dev);
739 
740 err_cq_table_free:
741 	mthca_cleanup_cq_table(dev);
742 
743 err_cmd_poll:
744 	mthca_cmd_use_polling(dev);
745 
746 err_eq_table_free:
747 	mthca_cleanup_eq_table(dev);
748 
749 err_pd_free:
750 	mthca_pd_free(dev, &dev->driver_pd);
751 
752 err_mr_table_free:
753 	mthca_cleanup_mr_table(dev);
754 
755 err_pd_table_free:
756 	mthca_cleanup_pd_table(dev);
757 
758 err_kar_unmap:
759 	iounmap(dev->kar);
760 
761 err_uar_free:
762 	mthca_uar_free(dev, &dev->driver_uar);
763 
764 err_uar_table_free:
765 	mthca_cleanup_uar_table(dev);
766 	return err;
767 }
768 
769 static int __devinit mthca_request_regions(struct pci_dev *pdev,
770 					   int ddr_hidden)
771 {
772 	int err;
773 
774 	/*
775 	 * We can't just use pci_request_regions() because the MSI-X
776 	 * table is right in the middle of the first BAR.  If we did
777 	 * pci_request_region and grab all of the first BAR, then
778 	 * setting up MSI-X would fail, since the PCI core wants to do
779 	 * request_mem_region on the MSI-X vector table.
780 	 *
781 	 * So just request what we need right now, and request any
782 	 * other regions we need when setting up EQs.
783 	 */
784 	if (!request_mem_region(pci_resource_start(pdev, 0) + MTHCA_HCR_BASE,
785 				MTHCA_HCR_SIZE, DRV_NAME))
786 		return -EBUSY;
787 
788 	err = pci_request_region(pdev, 2, DRV_NAME);
789 	if (err)
790 		goto err_bar2_failed;
791 
792 	if (!ddr_hidden) {
793 		err = pci_request_region(pdev, 4, DRV_NAME);
794 		if (err)
795 			goto err_bar4_failed;
796 	}
797 
798 	return 0;
799 
800 err_bar4_failed:
801 	pci_release_region(pdev, 2);
802 
803 err_bar2_failed:
804 	release_mem_region(pci_resource_start(pdev, 0) + MTHCA_HCR_BASE,
805 			   MTHCA_HCR_SIZE);
806 
807 	return err;
808 }
809 
810 static void mthca_release_regions(struct pci_dev *pdev,
811 				  int ddr_hidden)
812 {
813 	if (!ddr_hidden)
814 		pci_release_region(pdev, 4);
815 
816 	pci_release_region(pdev, 2);
817 
818 	release_mem_region(pci_resource_start(pdev, 0) + MTHCA_HCR_BASE,
819 			   MTHCA_HCR_SIZE);
820 }
821 
822 static int __devinit mthca_enable_msi_x(struct mthca_dev *mdev)
823 {
824 	struct msix_entry entries[3];
825 	int err;
826 
827 	entries[0].entry = 0;
828 	entries[1].entry = 1;
829 	entries[2].entry = 2;
830 
831 	err = pci_enable_msix(mdev->pdev, entries, ARRAY_SIZE(entries));
832 	if (err) {
833 		if (err > 0)
834 			mthca_info(mdev, "Only %d MSI-X vectors available, "
835 				   "not using MSI-X\n", err);
836 		return err;
837 	}
838 
839 	mdev->eq_table.eq[MTHCA_EQ_COMP ].msi_x_vector = entries[0].vector;
840 	mdev->eq_table.eq[MTHCA_EQ_ASYNC].msi_x_vector = entries[1].vector;
841 	mdev->eq_table.eq[MTHCA_EQ_CMD  ].msi_x_vector = entries[2].vector;
842 
843 	return 0;
844 }
845 
846 static void mthca_close_hca(struct mthca_dev *mdev)
847 {
848 	u8 status;
849 
850 	mthca_CLOSE_HCA(mdev, 0, &status);
851 
852 	if (mthca_is_memfree(mdev)) {
853 		mthca_free_icm_table(mdev, mdev->cq_table.table);
854 		mthca_free_icm_table(mdev, mdev->qp_table.rdb_table);
855 		mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
856 		mthca_free_icm_table(mdev, mdev->qp_table.qp_table);
857 		mthca_free_icm_table(mdev, mdev->mr_table.mpt_table);
858 		mthca_free_icm_table(mdev, mdev->mr_table.mtt_table);
859 		mthca_unmap_eq_icm(mdev);
860 
861 		mthca_UNMAP_ICM_AUX(mdev, &status);
862 		mthca_free_icm(mdev, mdev->fw.arbel.aux_icm);
863 
864 		mthca_UNMAP_FA(mdev, &status);
865 		mthca_free_icm(mdev, mdev->fw.arbel.fw_icm);
866 
867 		if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM))
868 			mthca_DISABLE_LAM(mdev, &status);
869 	} else
870 		mthca_SYS_DIS(mdev, &status);
871 }
872 
873 /* Types of supported HCA */
874 enum {
875 	TAVOR,			/* MT23108                        */
876 	ARBEL_COMPAT,		/* MT25208 in Tavor compat mode   */
877 	ARBEL_NATIVE,		/* MT25208 with extended features */
878 	SINAI			/* MT25204 */
879 };
880 
881 #define MTHCA_FW_VER(major, minor, subminor) \
882 	(((u64) (major) << 32) | ((u64) (minor) << 16) | (u64) (subminor))
883 
884 static struct {
885 	u64 latest_fw;
886 	int is_memfree;
887 	int is_pcie;
888 } mthca_hca_table[] = {
889 	[TAVOR]        = { .latest_fw = MTHCA_FW_VER(3, 3, 2), .is_memfree = 0, .is_pcie = 0 },
890 	[ARBEL_COMPAT] = { .latest_fw = MTHCA_FW_VER(4, 6, 2), .is_memfree = 0, .is_pcie = 1 },
891 	[ARBEL_NATIVE] = { .latest_fw = MTHCA_FW_VER(5, 0, 1), .is_memfree = 1, .is_pcie = 1 },
892 	[SINAI]        = { .latest_fw = MTHCA_FW_VER(1, 0, 1), .is_memfree = 1, .is_pcie = 1 }
893 };
894 
895 static int __devinit mthca_init_one(struct pci_dev *pdev,
896 				    const struct pci_device_id *id)
897 {
898 	static int mthca_version_printed = 0;
899 	int ddr_hidden = 0;
900 	int err;
901 	struct mthca_dev *mdev;
902 
903 	if (!mthca_version_printed) {
904 		printk(KERN_INFO "%s", mthca_version);
905 		++mthca_version_printed;
906 	}
907 
908 	printk(KERN_INFO PFX "Initializing %s (%s)\n",
909 	       pci_pretty_name(pdev), pci_name(pdev));
910 
911 	if (id->driver_data >= ARRAY_SIZE(mthca_hca_table)) {
912 		printk(KERN_ERR PFX "%s (%s) has invalid driver data %lx\n",
913 		       pci_pretty_name(pdev), pci_name(pdev), id->driver_data);
914 		return -ENODEV;
915 	}
916 
917 	err = pci_enable_device(pdev);
918 	if (err) {
919 		dev_err(&pdev->dev, "Cannot enable PCI device, "
920 			"aborting.\n");
921 		return err;
922 	}
923 
924 	/*
925 	 * Check for BARs.  We expect 0: 1MB, 2: 8MB, 4: DDR (may not
926 	 * be present)
927 	 */
928 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
929 	    pci_resource_len(pdev, 0) != 1 << 20) {
930 		dev_err(&pdev->dev, "Missing DCS, aborting.");
931 		err = -ENODEV;
932 		goto err_disable_pdev;
933 	}
934 	if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM) ||
935 	    pci_resource_len(pdev, 2) != 1 << 23) {
936 		dev_err(&pdev->dev, "Missing UAR, aborting.");
937 		err = -ENODEV;
938 		goto err_disable_pdev;
939 	}
940 	if (!(pci_resource_flags(pdev, 4) & IORESOURCE_MEM))
941 		ddr_hidden = 1;
942 
943 	err = mthca_request_regions(pdev, ddr_hidden);
944 	if (err) {
945 		dev_err(&pdev->dev, "Cannot obtain PCI resources, "
946 			"aborting.\n");
947 		goto err_disable_pdev;
948 	}
949 
950 	pci_set_master(pdev);
951 
952 	err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
953 	if (err) {
954 		dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
955 		err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
956 		if (err) {
957 			dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
958 			goto err_free_res;
959 		}
960 	}
961 	err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
962 	if (err) {
963 		dev_warn(&pdev->dev, "Warning: couldn't set 64-bit "
964 			 "consistent PCI DMA mask.\n");
965 		err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
966 		if (err) {
967 			dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, "
968 				"aborting.\n");
969 			goto err_free_res;
970 		}
971 	}
972 
973 	mdev = (struct mthca_dev *) ib_alloc_device(sizeof *mdev);
974 	if (!mdev) {
975 		dev_err(&pdev->dev, "Device struct alloc failed, "
976 			"aborting.\n");
977 		err = -ENOMEM;
978 		goto err_free_res;
979 	}
980 
981 	mdev->pdev = pdev;
982 
983 	if (ddr_hidden)
984 		mdev->mthca_flags |= MTHCA_FLAG_DDR_HIDDEN;
985 	if (mthca_hca_table[id->driver_data].is_memfree)
986 		mdev->mthca_flags |= MTHCA_FLAG_MEMFREE;
987 	if (mthca_hca_table[id->driver_data].is_pcie)
988 		mdev->mthca_flags |= MTHCA_FLAG_PCIE;
989 
990 	/*
991 	 * Now reset the HCA before we touch the PCI capabilities or
992 	 * attempt a firmware command, since a boot ROM may have left
993 	 * the HCA in an undefined state.
994 	 */
995 	err = mthca_reset(mdev);
996 	if (err) {
997 		mthca_err(mdev, "Failed to reset HCA, aborting.\n");
998 		goto err_free_dev;
999 	}
1000 
1001 	if (msi_x && !mthca_enable_msi_x(mdev))
1002 		mdev->mthca_flags |= MTHCA_FLAG_MSI_X;
1003 	if (msi && !(mdev->mthca_flags & MTHCA_FLAG_MSI_X) &&
1004 	    !pci_enable_msi(pdev))
1005 		mdev->mthca_flags |= MTHCA_FLAG_MSI;
1006 
1007 	sema_init(&mdev->cmd.hcr_sem, 1);
1008 	sema_init(&mdev->cmd.poll_sem, 1);
1009 	mdev->cmd.use_events = 0;
1010 
1011 	mdev->hcr = ioremap(pci_resource_start(pdev, 0) + MTHCA_HCR_BASE, MTHCA_HCR_SIZE);
1012 	if (!mdev->hcr) {
1013 		mthca_err(mdev, "Couldn't map command register, "
1014 			  "aborting.\n");
1015 		err = -ENOMEM;
1016 		goto err_free_dev;
1017 	}
1018 
1019 	err = mthca_tune_pci(mdev);
1020 	if (err)
1021 		goto err_iounmap;
1022 
1023 	err = mthca_init_hca(mdev);
1024 	if (err)
1025 		goto err_iounmap;
1026 
1027 	if (mdev->fw_ver < mthca_hca_table[id->driver_data].latest_fw) {
1028 		mthca_warn(mdev, "HCA FW version %x.%x.%x is old (%x.%x.%x is current).\n",
1029 			   (int) (mdev->fw_ver >> 32), (int) (mdev->fw_ver >> 16) & 0xffff,
1030 			   (int) (mdev->fw_ver & 0xffff),
1031 			   (int) (mthca_hca_table[id->driver_data].latest_fw >> 32),
1032 			   (int) (mthca_hca_table[id->driver_data].latest_fw >> 16) & 0xffff,
1033 			   (int) (mthca_hca_table[id->driver_data].latest_fw & 0xffff));
1034 		mthca_warn(mdev, "If you have problems, try updating your HCA FW.\n");
1035 	}
1036 
1037 	err = mthca_setup_hca(mdev);
1038 	if (err)
1039 		goto err_close;
1040 
1041 	err = mthca_register_device(mdev);
1042 	if (err)
1043 		goto err_cleanup;
1044 
1045 	err = mthca_create_agents(mdev);
1046 	if (err)
1047 		goto err_unregister;
1048 
1049 	pci_set_drvdata(pdev, mdev);
1050 
1051 	return 0;
1052 
1053 err_unregister:
1054 	mthca_unregister_device(mdev);
1055 
1056 err_cleanup:
1057 	mthca_cleanup_mcg_table(mdev);
1058 	mthca_cleanup_av_table(mdev);
1059 	mthca_cleanup_qp_table(mdev);
1060 	mthca_cleanup_cq_table(mdev);
1061 	mthca_cmd_use_polling(mdev);
1062 	mthca_cleanup_eq_table(mdev);
1063 
1064 	mthca_pd_free(mdev, &mdev->driver_pd);
1065 
1066 	mthca_cleanup_mr_table(mdev);
1067 	mthca_cleanup_pd_table(mdev);
1068 	mthca_cleanup_uar_table(mdev);
1069 
1070 err_close:
1071 	mthca_close_hca(mdev);
1072 
1073 err_iounmap:
1074 	iounmap(mdev->hcr);
1075 
1076 err_free_dev:
1077 	if (mdev->mthca_flags & MTHCA_FLAG_MSI_X)
1078 		pci_disable_msix(pdev);
1079 	if (mdev->mthca_flags & MTHCA_FLAG_MSI)
1080 		pci_disable_msi(pdev);
1081 
1082 	ib_dealloc_device(&mdev->ib_dev);
1083 
1084 err_free_res:
1085 	mthca_release_regions(pdev, ddr_hidden);
1086 
1087 err_disable_pdev:
1088 	pci_disable_device(pdev);
1089 	pci_set_drvdata(pdev, NULL);
1090 	return err;
1091 }
1092 
1093 static void __devexit mthca_remove_one(struct pci_dev *pdev)
1094 {
1095 	struct mthca_dev *mdev = pci_get_drvdata(pdev);
1096 	u8 status;
1097 	int p;
1098 
1099 	if (mdev) {
1100 		mthca_free_agents(mdev);
1101 		mthca_unregister_device(mdev);
1102 
1103 		for (p = 1; p <= mdev->limits.num_ports; ++p)
1104 			mthca_CLOSE_IB(mdev, p, &status);
1105 
1106 		mthca_cleanup_mcg_table(mdev);
1107 		mthca_cleanup_av_table(mdev);
1108 		mthca_cleanup_qp_table(mdev);
1109 		mthca_cleanup_cq_table(mdev);
1110 		mthca_cmd_use_polling(mdev);
1111 		mthca_cleanup_eq_table(mdev);
1112 
1113 		mthca_pd_free(mdev, &mdev->driver_pd);
1114 
1115 		mthca_cleanup_mr_table(mdev);
1116 		mthca_cleanup_pd_table(mdev);
1117 
1118 		iounmap(mdev->kar);
1119 		mthca_uar_free(mdev, &mdev->driver_uar);
1120 		mthca_cleanup_uar_table(mdev);
1121 
1122 		mthca_close_hca(mdev);
1123 
1124 		iounmap(mdev->hcr);
1125 
1126 		if (mdev->mthca_flags & MTHCA_FLAG_MSI_X)
1127 			pci_disable_msix(pdev);
1128 		if (mdev->mthca_flags & MTHCA_FLAG_MSI)
1129 			pci_disable_msi(pdev);
1130 
1131 		ib_dealloc_device(&mdev->ib_dev);
1132 		mthca_release_regions(pdev, mdev->mthca_flags &
1133 				      MTHCA_FLAG_DDR_HIDDEN);
1134 		pci_disable_device(pdev);
1135 		pci_set_drvdata(pdev, NULL);
1136 	}
1137 }
1138 
1139 static struct pci_device_id mthca_pci_table[] = {
1140 	{ PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR),
1141 	  .driver_data = TAVOR },
1142 	{ PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_TAVOR),
1143 	  .driver_data = TAVOR },
1144 	{ PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT),
1145 	  .driver_data = ARBEL_COMPAT },
1146 	{ PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT),
1147 	  .driver_data = ARBEL_COMPAT },
1148 	{ PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_ARBEL),
1149 	  .driver_data = ARBEL_NATIVE },
1150 	{ PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_ARBEL),
1151 	  .driver_data = ARBEL_NATIVE },
1152 	{ PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_SINAI),
1153 	  .driver_data = SINAI },
1154 	{ PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_SINAI),
1155 	  .driver_data = SINAI },
1156 	{ PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_SINAI_OLD),
1157 	  .driver_data = SINAI },
1158 	{ PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_SINAI_OLD),
1159 	  .driver_data = SINAI },
1160 	{ 0, }
1161 };
1162 
1163 MODULE_DEVICE_TABLE(pci, mthca_pci_table);
1164 
1165 static struct pci_driver mthca_driver = {
1166 	.name		= "ib_mthca",
1167 	.id_table	= mthca_pci_table,
1168 	.probe		= mthca_init_one,
1169 	.remove		= __devexit_p(mthca_remove_one)
1170 };
1171 
1172 static int __init mthca_init(void)
1173 {
1174 	int ret;
1175 
1176 	ret = pci_register_driver(&mthca_driver);
1177 	return ret < 0 ? ret : 0;
1178 }
1179 
1180 static void __exit mthca_cleanup(void)
1181 {
1182 	pci_unregister_driver(&mthca_driver);
1183 }
1184 
1185 module_init(mthca_init);
1186 module_exit(mthca_cleanup);
1187