xref: /freebsd/sys/dev/mthca/mthca_eq.c (revision 33ec1ccbae880855a4aa9e221ba8512da70e541e)
1*33ec1ccbSHans Petter Selasky /*
2*33ec1ccbSHans Petter Selasky  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3*33ec1ccbSHans Petter Selasky  * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
4*33ec1ccbSHans Petter Selasky  *
5*33ec1ccbSHans Petter Selasky  * This software is available to you under a choice of one of two
6*33ec1ccbSHans Petter Selasky  * licenses.  You may choose to be licensed under the terms of the GNU
7*33ec1ccbSHans Petter Selasky  * General Public License (GPL) Version 2, available from the file
8*33ec1ccbSHans Petter Selasky  * COPYING in the main directory of this source tree, or the
9*33ec1ccbSHans Petter Selasky  * OpenIB.org BSD license below:
10*33ec1ccbSHans Petter Selasky  *
11*33ec1ccbSHans Petter Selasky  *     Redistribution and use in source and binary forms, with or
12*33ec1ccbSHans Petter Selasky  *     without modification, are permitted provided that the following
13*33ec1ccbSHans Petter Selasky  *     conditions are met:
14*33ec1ccbSHans Petter Selasky  *
15*33ec1ccbSHans Petter Selasky  *      - Redistributions of source code must retain the above
16*33ec1ccbSHans Petter Selasky  *        copyright notice, this list of conditions and the following
17*33ec1ccbSHans Petter Selasky  *        disclaimer.
18*33ec1ccbSHans Petter Selasky  *
19*33ec1ccbSHans Petter Selasky  *      - Redistributions in binary form must reproduce the above
20*33ec1ccbSHans Petter Selasky  *        copyright notice, this list of conditions and the following
21*33ec1ccbSHans Petter Selasky  *        disclaimer in the documentation and/or other materials
22*33ec1ccbSHans Petter Selasky  *        provided with the distribution.
23*33ec1ccbSHans Petter Selasky  *
24*33ec1ccbSHans Petter Selasky  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25*33ec1ccbSHans Petter Selasky  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26*33ec1ccbSHans Petter Selasky  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27*33ec1ccbSHans Petter Selasky  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28*33ec1ccbSHans Petter Selasky  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29*33ec1ccbSHans Petter Selasky  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30*33ec1ccbSHans Petter Selasky  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31*33ec1ccbSHans Petter Selasky  * SOFTWARE.
32*33ec1ccbSHans Petter Selasky  */
33*33ec1ccbSHans Petter Selasky 
34*33ec1ccbSHans Petter Selasky #include <linux/errno.h>
35*33ec1ccbSHans Petter Selasky #include <linux/interrupt.h>
36*33ec1ccbSHans Petter Selasky #include <linux/pci.h>
37*33ec1ccbSHans Petter Selasky #include <linux/slab.h>
38*33ec1ccbSHans Petter Selasky 
39*33ec1ccbSHans Petter Selasky #include "mthca_dev.h"
40*33ec1ccbSHans Petter Selasky #include "mthca_cmd.h"
41*33ec1ccbSHans Petter Selasky #include "mthca_config_reg.h"
42*33ec1ccbSHans Petter Selasky 
43*33ec1ccbSHans Petter Selasky enum {
44*33ec1ccbSHans Petter Selasky 	MTHCA_NUM_ASYNC_EQE = 0x80,
45*33ec1ccbSHans Petter Selasky 	MTHCA_NUM_CMD_EQE   = 0x80,
46*33ec1ccbSHans Petter Selasky 	MTHCA_NUM_SPARE_EQE = 0x80,
47*33ec1ccbSHans Petter Selasky 	MTHCA_EQ_ENTRY_SIZE = 0x20
48*33ec1ccbSHans Petter Selasky };
49*33ec1ccbSHans Petter Selasky 
50*33ec1ccbSHans Petter Selasky /*
51*33ec1ccbSHans Petter Selasky  * Must be packed because start is 64 bits but only aligned to 32 bits.
52*33ec1ccbSHans Petter Selasky  */
53*33ec1ccbSHans Petter Selasky struct mthca_eq_context {
54*33ec1ccbSHans Petter Selasky 	__be32 flags;
55*33ec1ccbSHans Petter Selasky 	__be64 start;
56*33ec1ccbSHans Petter Selasky 	__be32 logsize_usrpage;
57*33ec1ccbSHans Petter Selasky 	__be32 tavor_pd;	/* reserved for Arbel */
58*33ec1ccbSHans Petter Selasky 	u8     reserved1[3];
59*33ec1ccbSHans Petter Selasky 	u8     intr;
60*33ec1ccbSHans Petter Selasky 	__be32 arbel_pd;	/* lost_count for Tavor */
61*33ec1ccbSHans Petter Selasky 	__be32 lkey;
62*33ec1ccbSHans Petter Selasky 	u32    reserved2[2];
63*33ec1ccbSHans Petter Selasky 	__be32 consumer_index;
64*33ec1ccbSHans Petter Selasky 	__be32 producer_index;
65*33ec1ccbSHans Petter Selasky 	u32    reserved3[4];
66*33ec1ccbSHans Petter Selasky } __attribute__((packed));
67*33ec1ccbSHans Petter Selasky 
68*33ec1ccbSHans Petter Selasky #define MTHCA_EQ_STATUS_OK          ( 0 << 28)
69*33ec1ccbSHans Petter Selasky #define MTHCA_EQ_STATUS_OVERFLOW    ( 9 << 28)
70*33ec1ccbSHans Petter Selasky #define MTHCA_EQ_STATUS_WRITE_FAIL  (10 << 28)
71*33ec1ccbSHans Petter Selasky #define MTHCA_EQ_OWNER_SW           ( 0 << 24)
72*33ec1ccbSHans Petter Selasky #define MTHCA_EQ_OWNER_HW           ( 1 << 24)
73*33ec1ccbSHans Petter Selasky #define MTHCA_EQ_FLAG_TR            ( 1 << 18)
74*33ec1ccbSHans Petter Selasky #define MTHCA_EQ_FLAG_OI            ( 1 << 17)
75*33ec1ccbSHans Petter Selasky #define MTHCA_EQ_STATE_ARMED        ( 1 <<  8)
76*33ec1ccbSHans Petter Selasky #define MTHCA_EQ_STATE_FIRED        ( 2 <<  8)
77*33ec1ccbSHans Petter Selasky #define MTHCA_EQ_STATE_ALWAYS_ARMED ( 3 <<  8)
78*33ec1ccbSHans Petter Selasky #define MTHCA_EQ_STATE_ARBEL        ( 8 <<  8)
79*33ec1ccbSHans Petter Selasky 
80*33ec1ccbSHans Petter Selasky enum {
81*33ec1ccbSHans Petter Selasky 	MTHCA_EVENT_TYPE_COMP       	    = 0x00,
82*33ec1ccbSHans Petter Selasky 	MTHCA_EVENT_TYPE_PATH_MIG   	    = 0x01,
83*33ec1ccbSHans Petter Selasky 	MTHCA_EVENT_TYPE_COMM_EST   	    = 0x02,
84*33ec1ccbSHans Petter Selasky 	MTHCA_EVENT_TYPE_SQ_DRAINED 	    = 0x03,
85*33ec1ccbSHans Petter Selasky 	MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE    = 0x13,
86*33ec1ccbSHans Petter Selasky 	MTHCA_EVENT_TYPE_SRQ_LIMIT	    = 0x14,
87*33ec1ccbSHans Petter Selasky 	MTHCA_EVENT_TYPE_CQ_ERROR   	    = 0x04,
88*33ec1ccbSHans Petter Selasky 	MTHCA_EVENT_TYPE_WQ_CATAS_ERROR     = 0x05,
89*33ec1ccbSHans Petter Selasky 	MTHCA_EVENT_TYPE_EEC_CATAS_ERROR    = 0x06,
90*33ec1ccbSHans Petter Selasky 	MTHCA_EVENT_TYPE_PATH_MIG_FAILED    = 0x07,
91*33ec1ccbSHans Petter Selasky 	MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10,
92*33ec1ccbSHans Petter Selasky 	MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR    = 0x11,
93*33ec1ccbSHans Petter Selasky 	MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR    = 0x12,
94*33ec1ccbSHans Petter Selasky 	MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR  = 0x08,
95*33ec1ccbSHans Petter Selasky 	MTHCA_EVENT_TYPE_PORT_CHANGE        = 0x09,
96*33ec1ccbSHans Petter Selasky 	MTHCA_EVENT_TYPE_EQ_OVERFLOW        = 0x0f,
97*33ec1ccbSHans Petter Selasky 	MTHCA_EVENT_TYPE_ECC_DETECT         = 0x0e,
98*33ec1ccbSHans Petter Selasky 	MTHCA_EVENT_TYPE_CMD                = 0x0a
99*33ec1ccbSHans Petter Selasky };
100*33ec1ccbSHans Petter Selasky 
101*33ec1ccbSHans Petter Selasky #define MTHCA_ASYNC_EVENT_MASK ((1ULL << MTHCA_EVENT_TYPE_PATH_MIG)           | \
102*33ec1ccbSHans Petter Selasky 				(1ULL << MTHCA_EVENT_TYPE_COMM_EST)           | \
103*33ec1ccbSHans Petter Selasky 				(1ULL << MTHCA_EVENT_TYPE_SQ_DRAINED)         | \
104*33ec1ccbSHans Petter Selasky 				(1ULL << MTHCA_EVENT_TYPE_CQ_ERROR)           | \
105*33ec1ccbSHans Petter Selasky 				(1ULL << MTHCA_EVENT_TYPE_WQ_CATAS_ERROR)     | \
106*33ec1ccbSHans Petter Selasky 				(1ULL << MTHCA_EVENT_TYPE_EEC_CATAS_ERROR)    | \
107*33ec1ccbSHans Petter Selasky 				(1ULL << MTHCA_EVENT_TYPE_PATH_MIG_FAILED)    | \
108*33ec1ccbSHans Petter Selasky 				(1ULL << MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
109*33ec1ccbSHans Petter Selasky 				(1ULL << MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR)    | \
110*33ec1ccbSHans Petter Selasky 				(1ULL << MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR)  | \
111*33ec1ccbSHans Petter Selasky 				(1ULL << MTHCA_EVENT_TYPE_PORT_CHANGE)        | \
112*33ec1ccbSHans Petter Selasky 				(1ULL << MTHCA_EVENT_TYPE_ECC_DETECT))
113*33ec1ccbSHans Petter Selasky #define MTHCA_SRQ_EVENT_MASK   ((1ULL << MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR)    | \
114*33ec1ccbSHans Petter Selasky 				(1ULL << MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE)    | \
115*33ec1ccbSHans Petter Selasky 				(1ULL << MTHCA_EVENT_TYPE_SRQ_LIMIT))
116*33ec1ccbSHans Petter Selasky #define MTHCA_CMD_EVENT_MASK    (1ULL << MTHCA_EVENT_TYPE_CMD)
117*33ec1ccbSHans Petter Selasky 
118*33ec1ccbSHans Petter Selasky #define MTHCA_EQ_DB_INC_CI     (1 << 24)
119*33ec1ccbSHans Petter Selasky #define MTHCA_EQ_DB_REQ_NOT    (2 << 24)
120*33ec1ccbSHans Petter Selasky #define MTHCA_EQ_DB_DISARM_CQ  (3 << 24)
121*33ec1ccbSHans Petter Selasky #define MTHCA_EQ_DB_SET_CI     (4 << 24)
122*33ec1ccbSHans Petter Selasky #define MTHCA_EQ_DB_ALWAYS_ARM (5 << 24)
123*33ec1ccbSHans Petter Selasky 
124*33ec1ccbSHans Petter Selasky struct mthca_eqe {
125*33ec1ccbSHans Petter Selasky 	u8 reserved1;
126*33ec1ccbSHans Petter Selasky 	u8 type;
127*33ec1ccbSHans Petter Selasky 	u8 reserved2;
128*33ec1ccbSHans Petter Selasky 	u8 subtype;
129*33ec1ccbSHans Petter Selasky 	union {
130*33ec1ccbSHans Petter Selasky 		u32 raw[6];
131*33ec1ccbSHans Petter Selasky 		struct {
132*33ec1ccbSHans Petter Selasky 			__be32 cqn;
133*33ec1ccbSHans Petter Selasky 		} __attribute__((packed)) comp;
134*33ec1ccbSHans Petter Selasky 		struct {
135*33ec1ccbSHans Petter Selasky 			u16    reserved1;
136*33ec1ccbSHans Petter Selasky 			__be16 token;
137*33ec1ccbSHans Petter Selasky 			u32    reserved2;
138*33ec1ccbSHans Petter Selasky 			u8     reserved3[3];
139*33ec1ccbSHans Petter Selasky 			u8     status;
140*33ec1ccbSHans Petter Selasky 			__be64 out_param;
141*33ec1ccbSHans Petter Selasky 		} __attribute__((packed)) cmd;
142*33ec1ccbSHans Petter Selasky 		struct {
143*33ec1ccbSHans Petter Selasky 			__be32 qpn;
144*33ec1ccbSHans Petter Selasky 		} __attribute__((packed)) qp;
145*33ec1ccbSHans Petter Selasky 		struct {
146*33ec1ccbSHans Petter Selasky 			__be32 srqn;
147*33ec1ccbSHans Petter Selasky 		} __attribute__((packed)) srq;
148*33ec1ccbSHans Petter Selasky 		struct {
149*33ec1ccbSHans Petter Selasky 			__be32 cqn;
150*33ec1ccbSHans Petter Selasky 			u32    reserved1;
151*33ec1ccbSHans Petter Selasky 			u8     reserved2[3];
152*33ec1ccbSHans Petter Selasky 			u8     syndrome;
153*33ec1ccbSHans Petter Selasky 		} __attribute__((packed)) cq_err;
154*33ec1ccbSHans Petter Selasky 		struct {
155*33ec1ccbSHans Petter Selasky 			u32    reserved1[2];
156*33ec1ccbSHans Petter Selasky 			__be32 port;
157*33ec1ccbSHans Petter Selasky 		} __attribute__((packed)) port_change;
158*33ec1ccbSHans Petter Selasky 	} event;
159*33ec1ccbSHans Petter Selasky 	u8 reserved3[3];
160*33ec1ccbSHans Petter Selasky 	u8 owner;
161*33ec1ccbSHans Petter Selasky } __attribute__((packed));
162*33ec1ccbSHans Petter Selasky 
163*33ec1ccbSHans Petter Selasky #define  MTHCA_EQ_ENTRY_OWNER_SW      (0 << 7)
164*33ec1ccbSHans Petter Selasky #define  MTHCA_EQ_ENTRY_OWNER_HW      (1 << 7)
165*33ec1ccbSHans Petter Selasky 
async_mask(struct mthca_dev * dev)166*33ec1ccbSHans Petter Selasky static inline u64 async_mask(struct mthca_dev *dev)
167*33ec1ccbSHans Petter Selasky {
168*33ec1ccbSHans Petter Selasky 	return dev->mthca_flags & MTHCA_FLAG_SRQ ?
169*33ec1ccbSHans Petter Selasky 		MTHCA_ASYNC_EVENT_MASK | MTHCA_SRQ_EVENT_MASK :
170*33ec1ccbSHans Petter Selasky 		MTHCA_ASYNC_EVENT_MASK;
171*33ec1ccbSHans Petter Selasky }
172*33ec1ccbSHans Petter Selasky 
tavor_set_eq_ci(struct mthca_dev * dev,struct mthca_eq * eq,u32 ci)173*33ec1ccbSHans Petter Selasky static inline void tavor_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci)
174*33ec1ccbSHans Petter Selasky {
175*33ec1ccbSHans Petter Selasky 	/*
176*33ec1ccbSHans Petter Selasky 	 * This barrier makes sure that all updates to ownership bits
177*33ec1ccbSHans Petter Selasky 	 * done by set_eqe_hw() hit memory before the consumer index
178*33ec1ccbSHans Petter Selasky 	 * is updated.  set_eq_ci() allows the HCA to possibly write
179*33ec1ccbSHans Petter Selasky 	 * more EQ entries, and we want to avoid the exceedingly
180*33ec1ccbSHans Petter Selasky 	 * unlikely possibility of the HCA writing an entry and then
181*33ec1ccbSHans Petter Selasky 	 * having set_eqe_hw() overwrite the owner field.
182*33ec1ccbSHans Petter Selasky 	 */
183*33ec1ccbSHans Petter Selasky 	wmb();
184*33ec1ccbSHans Petter Selasky 	mthca_write64(MTHCA_EQ_DB_SET_CI | eq->eqn, ci & (eq->nent - 1),
185*33ec1ccbSHans Petter Selasky 		      dev->kar + MTHCA_EQ_DOORBELL,
186*33ec1ccbSHans Petter Selasky 		      MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
187*33ec1ccbSHans Petter Selasky }
188*33ec1ccbSHans Petter Selasky 
arbel_set_eq_ci(struct mthca_dev * dev,struct mthca_eq * eq,u32 ci)189*33ec1ccbSHans Petter Selasky static inline void arbel_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci)
190*33ec1ccbSHans Petter Selasky {
191*33ec1ccbSHans Petter Selasky 	/* See comment in tavor_set_eq_ci() above. */
192*33ec1ccbSHans Petter Selasky 	wmb();
193*33ec1ccbSHans Petter Selasky 	__raw_writel((__force u32) cpu_to_be32(ci),
194*33ec1ccbSHans Petter Selasky 		     dev->eq_regs.arbel.eq_set_ci_base + eq->eqn * 8);
195*33ec1ccbSHans Petter Selasky 	/* We still want ordering, just not swabbing, so add a barrier */
196*33ec1ccbSHans Petter Selasky 	mb();
197*33ec1ccbSHans Petter Selasky }
198*33ec1ccbSHans Petter Selasky 
set_eq_ci(struct mthca_dev * dev,struct mthca_eq * eq,u32 ci)199*33ec1ccbSHans Petter Selasky static inline void set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci)
200*33ec1ccbSHans Petter Selasky {
201*33ec1ccbSHans Petter Selasky 	if (mthca_is_memfree(dev))
202*33ec1ccbSHans Petter Selasky 		arbel_set_eq_ci(dev, eq, ci);
203*33ec1ccbSHans Petter Selasky 	else
204*33ec1ccbSHans Petter Selasky 		tavor_set_eq_ci(dev, eq, ci);
205*33ec1ccbSHans Petter Selasky }
206*33ec1ccbSHans Petter Selasky 
tavor_eq_req_not(struct mthca_dev * dev,int eqn)207*33ec1ccbSHans Petter Selasky static inline void tavor_eq_req_not(struct mthca_dev *dev, int eqn)
208*33ec1ccbSHans Petter Selasky {
209*33ec1ccbSHans Petter Selasky 	mthca_write64(MTHCA_EQ_DB_REQ_NOT | eqn, 0,
210*33ec1ccbSHans Petter Selasky 		      dev->kar + MTHCA_EQ_DOORBELL,
211*33ec1ccbSHans Petter Selasky 		      MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
212*33ec1ccbSHans Petter Selasky }
213*33ec1ccbSHans Petter Selasky 
arbel_eq_req_not(struct mthca_dev * dev,u32 eqn_mask)214*33ec1ccbSHans Petter Selasky static inline void arbel_eq_req_not(struct mthca_dev *dev, u32 eqn_mask)
215*33ec1ccbSHans Petter Selasky {
216*33ec1ccbSHans Petter Selasky 	writel(eqn_mask, dev->eq_regs.arbel.eq_arm);
217*33ec1ccbSHans Petter Selasky }
218*33ec1ccbSHans Petter Selasky 
disarm_cq(struct mthca_dev * dev,int eqn,int cqn)219*33ec1ccbSHans Petter Selasky static inline void disarm_cq(struct mthca_dev *dev, int eqn, int cqn)
220*33ec1ccbSHans Petter Selasky {
221*33ec1ccbSHans Petter Selasky 	if (!mthca_is_memfree(dev)) {
222*33ec1ccbSHans Petter Selasky 		mthca_write64(MTHCA_EQ_DB_DISARM_CQ | eqn, cqn,
223*33ec1ccbSHans Petter Selasky 			      dev->kar + MTHCA_EQ_DOORBELL,
224*33ec1ccbSHans Petter Selasky 			      MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
225*33ec1ccbSHans Petter Selasky 	}
226*33ec1ccbSHans Petter Selasky }
227*33ec1ccbSHans Petter Selasky 
get_eqe(struct mthca_eq * eq,u32 entry)228*33ec1ccbSHans Petter Selasky static inline struct mthca_eqe *get_eqe(struct mthca_eq *eq, u32 entry)
229*33ec1ccbSHans Petter Selasky {
230*33ec1ccbSHans Petter Selasky 	unsigned long off = (entry & (eq->nent - 1)) * MTHCA_EQ_ENTRY_SIZE;
231*33ec1ccbSHans Petter Selasky 	return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE;
232*33ec1ccbSHans Petter Selasky }
233*33ec1ccbSHans Petter Selasky 
next_eqe_sw(struct mthca_eq * eq)234*33ec1ccbSHans Petter Selasky static inline struct mthca_eqe *next_eqe_sw(struct mthca_eq *eq)
235*33ec1ccbSHans Petter Selasky {
236*33ec1ccbSHans Petter Selasky 	struct mthca_eqe *eqe;
237*33ec1ccbSHans Petter Selasky 	eqe = get_eqe(eq, eq->cons_index);
238*33ec1ccbSHans Petter Selasky 	return (MTHCA_EQ_ENTRY_OWNER_HW & eqe->owner) ? NULL : eqe;
239*33ec1ccbSHans Petter Selasky }
240*33ec1ccbSHans Petter Selasky 
set_eqe_hw(struct mthca_eqe * eqe)241*33ec1ccbSHans Petter Selasky static inline void set_eqe_hw(struct mthca_eqe *eqe)
242*33ec1ccbSHans Petter Selasky {
243*33ec1ccbSHans Petter Selasky 	eqe->owner =  MTHCA_EQ_ENTRY_OWNER_HW;
244*33ec1ccbSHans Petter Selasky }
245*33ec1ccbSHans Petter Selasky 
port_change(struct mthca_dev * dev,int port,int active)246*33ec1ccbSHans Petter Selasky static void port_change(struct mthca_dev *dev, int port, int active)
247*33ec1ccbSHans Petter Selasky {
248*33ec1ccbSHans Petter Selasky 	struct ib_event record;
249*33ec1ccbSHans Petter Selasky 
250*33ec1ccbSHans Petter Selasky 	mthca_dbg(dev, "Port change to %s for port %d\n",
251*33ec1ccbSHans Petter Selasky 		  active ? "active" : "down", port);
252*33ec1ccbSHans Petter Selasky 
253*33ec1ccbSHans Petter Selasky 	record.device = &dev->ib_dev;
254*33ec1ccbSHans Petter Selasky 	record.event  = active ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
255*33ec1ccbSHans Petter Selasky 	record.element.port_num = port;
256*33ec1ccbSHans Petter Selasky 
257*33ec1ccbSHans Petter Selasky 	ib_dispatch_event(&record);
258*33ec1ccbSHans Petter Selasky }
259*33ec1ccbSHans Petter Selasky 
mthca_eq_int(struct mthca_dev * dev,struct mthca_eq * eq)260*33ec1ccbSHans Petter Selasky static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq)
261*33ec1ccbSHans Petter Selasky {
262*33ec1ccbSHans Petter Selasky 	struct mthca_eqe *eqe;
263*33ec1ccbSHans Petter Selasky 	int disarm_cqn;
264*33ec1ccbSHans Petter Selasky 	int eqes_found = 0;
265*33ec1ccbSHans Petter Selasky 	int set_ci = 0;
266*33ec1ccbSHans Petter Selasky 
267*33ec1ccbSHans Petter Selasky 	while ((eqe = next_eqe_sw(eq))) {
268*33ec1ccbSHans Petter Selasky 		/*
269*33ec1ccbSHans Petter Selasky 		 * Make sure we read EQ entry contents after we've
270*33ec1ccbSHans Petter Selasky 		 * checked the ownership bit.
271*33ec1ccbSHans Petter Selasky 		 */
272*33ec1ccbSHans Petter Selasky 		rmb();
273*33ec1ccbSHans Petter Selasky 
274*33ec1ccbSHans Petter Selasky 		switch (eqe->type) {
275*33ec1ccbSHans Petter Selasky 		case MTHCA_EVENT_TYPE_COMP:
276*33ec1ccbSHans Petter Selasky 			disarm_cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff;
277*33ec1ccbSHans Petter Selasky 			disarm_cq(dev, eq->eqn, disarm_cqn);
278*33ec1ccbSHans Petter Selasky 			mthca_cq_completion(dev, disarm_cqn);
279*33ec1ccbSHans Petter Selasky 			break;
280*33ec1ccbSHans Petter Selasky 
281*33ec1ccbSHans Petter Selasky 		case MTHCA_EVENT_TYPE_PATH_MIG:
282*33ec1ccbSHans Petter Selasky 			mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
283*33ec1ccbSHans Petter Selasky 				       IB_EVENT_PATH_MIG);
284*33ec1ccbSHans Petter Selasky 			break;
285*33ec1ccbSHans Petter Selasky 
286*33ec1ccbSHans Petter Selasky 		case MTHCA_EVENT_TYPE_COMM_EST:
287*33ec1ccbSHans Petter Selasky 			mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
288*33ec1ccbSHans Petter Selasky 				       IB_EVENT_COMM_EST);
289*33ec1ccbSHans Petter Selasky 			break;
290*33ec1ccbSHans Petter Selasky 
291*33ec1ccbSHans Petter Selasky 		case MTHCA_EVENT_TYPE_SQ_DRAINED:
292*33ec1ccbSHans Petter Selasky 			mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
293*33ec1ccbSHans Petter Selasky 				       IB_EVENT_SQ_DRAINED);
294*33ec1ccbSHans Petter Selasky 			break;
295*33ec1ccbSHans Petter Selasky 
296*33ec1ccbSHans Petter Selasky 		case MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE:
297*33ec1ccbSHans Petter Selasky 			mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
298*33ec1ccbSHans Petter Selasky 				       IB_EVENT_QP_LAST_WQE_REACHED);
299*33ec1ccbSHans Petter Selasky 			break;
300*33ec1ccbSHans Petter Selasky 
301*33ec1ccbSHans Petter Selasky 		case MTHCA_EVENT_TYPE_SRQ_LIMIT:
302*33ec1ccbSHans Petter Selasky 			mthca_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff,
303*33ec1ccbSHans Petter Selasky 					IB_EVENT_SRQ_LIMIT_REACHED);
304*33ec1ccbSHans Petter Selasky 			break;
305*33ec1ccbSHans Petter Selasky 
306*33ec1ccbSHans Petter Selasky 		case MTHCA_EVENT_TYPE_WQ_CATAS_ERROR:
307*33ec1ccbSHans Petter Selasky 			mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
308*33ec1ccbSHans Petter Selasky 				       IB_EVENT_QP_FATAL);
309*33ec1ccbSHans Petter Selasky 			break;
310*33ec1ccbSHans Petter Selasky 
311*33ec1ccbSHans Petter Selasky 		case MTHCA_EVENT_TYPE_PATH_MIG_FAILED:
312*33ec1ccbSHans Petter Selasky 			mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
313*33ec1ccbSHans Petter Selasky 				       IB_EVENT_PATH_MIG_ERR);
314*33ec1ccbSHans Petter Selasky 			break;
315*33ec1ccbSHans Petter Selasky 
316*33ec1ccbSHans Petter Selasky 		case MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
317*33ec1ccbSHans Petter Selasky 			mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
318*33ec1ccbSHans Petter Selasky 				       IB_EVENT_QP_REQ_ERR);
319*33ec1ccbSHans Petter Selasky 			break;
320*33ec1ccbSHans Petter Selasky 
321*33ec1ccbSHans Petter Selasky 		case MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR:
322*33ec1ccbSHans Petter Selasky 			mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
323*33ec1ccbSHans Petter Selasky 				       IB_EVENT_QP_ACCESS_ERR);
324*33ec1ccbSHans Petter Selasky 			break;
325*33ec1ccbSHans Petter Selasky 
326*33ec1ccbSHans Petter Selasky 		case MTHCA_EVENT_TYPE_CMD:
327*33ec1ccbSHans Petter Selasky 			mthca_cmd_event(dev,
328*33ec1ccbSHans Petter Selasky 					be16_to_cpu(eqe->event.cmd.token),
329*33ec1ccbSHans Petter Selasky 					eqe->event.cmd.status,
330*33ec1ccbSHans Petter Selasky 					be64_to_cpu(eqe->event.cmd.out_param));
331*33ec1ccbSHans Petter Selasky 			break;
332*33ec1ccbSHans Petter Selasky 
333*33ec1ccbSHans Petter Selasky 		case MTHCA_EVENT_TYPE_PORT_CHANGE:
334*33ec1ccbSHans Petter Selasky 			port_change(dev,
335*33ec1ccbSHans Petter Selasky 				    (be32_to_cpu(eqe->event.port_change.port) >> 28) & 3,
336*33ec1ccbSHans Petter Selasky 				    eqe->subtype == 0x4);
337*33ec1ccbSHans Petter Selasky 			break;
338*33ec1ccbSHans Petter Selasky 
339*33ec1ccbSHans Petter Selasky 		case MTHCA_EVENT_TYPE_CQ_ERROR:
340*33ec1ccbSHans Petter Selasky 			mthca_warn(dev, "CQ %s on CQN %06x\n",
341*33ec1ccbSHans Petter Selasky 				   eqe->event.cq_err.syndrome == 1 ?
342*33ec1ccbSHans Petter Selasky 				   "overrun" : "access violation",
343*33ec1ccbSHans Petter Selasky 				   be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff);
344*33ec1ccbSHans Petter Selasky 			mthca_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn),
345*33ec1ccbSHans Petter Selasky 				       IB_EVENT_CQ_ERR);
346*33ec1ccbSHans Petter Selasky 			break;
347*33ec1ccbSHans Petter Selasky 
348*33ec1ccbSHans Petter Selasky 		case MTHCA_EVENT_TYPE_EQ_OVERFLOW:
349*33ec1ccbSHans Petter Selasky 			mthca_warn(dev, "EQ overrun on EQN %d\n", eq->eqn);
350*33ec1ccbSHans Petter Selasky 			break;
351*33ec1ccbSHans Petter Selasky 
352*33ec1ccbSHans Petter Selasky 		case MTHCA_EVENT_TYPE_EEC_CATAS_ERROR:
353*33ec1ccbSHans Petter Selasky 		case MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR:
354*33ec1ccbSHans Petter Selasky 		case MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR:
355*33ec1ccbSHans Petter Selasky 		case MTHCA_EVENT_TYPE_ECC_DETECT:
356*33ec1ccbSHans Petter Selasky 		default:
357*33ec1ccbSHans Petter Selasky 			mthca_warn(dev, "Unhandled event %02x(%02x) on EQ %d\n",
358*33ec1ccbSHans Petter Selasky 				   eqe->type, eqe->subtype, eq->eqn);
359*33ec1ccbSHans Petter Selasky 			break;
360*33ec1ccbSHans Petter Selasky 		}
361*33ec1ccbSHans Petter Selasky 
362*33ec1ccbSHans Petter Selasky 		set_eqe_hw(eqe);
363*33ec1ccbSHans Petter Selasky 		++eq->cons_index;
364*33ec1ccbSHans Petter Selasky 		eqes_found = 1;
365*33ec1ccbSHans Petter Selasky 		++set_ci;
366*33ec1ccbSHans Petter Selasky 
367*33ec1ccbSHans Petter Selasky 		/*
368*33ec1ccbSHans Petter Selasky 		 * The HCA will think the queue has overflowed if we
369*33ec1ccbSHans Petter Selasky 		 * don't tell it we've been processing events.  We
370*33ec1ccbSHans Petter Selasky 		 * create our EQs with MTHCA_NUM_SPARE_EQE extra
371*33ec1ccbSHans Petter Selasky 		 * entries, so we must update our consumer index at
372*33ec1ccbSHans Petter Selasky 		 * least that often.
373*33ec1ccbSHans Petter Selasky 		 */
374*33ec1ccbSHans Petter Selasky 		if (unlikely(set_ci >= MTHCA_NUM_SPARE_EQE)) {
375*33ec1ccbSHans Petter Selasky 			/*
376*33ec1ccbSHans Petter Selasky 			 * Conditional on hca_type is OK here because
377*33ec1ccbSHans Petter Selasky 			 * this is a rare case, not the fast path.
378*33ec1ccbSHans Petter Selasky 			 */
379*33ec1ccbSHans Petter Selasky 			set_eq_ci(dev, eq, eq->cons_index);
380*33ec1ccbSHans Petter Selasky 			set_ci = 0;
381*33ec1ccbSHans Petter Selasky 		}
382*33ec1ccbSHans Petter Selasky 	}
383*33ec1ccbSHans Petter Selasky 
384*33ec1ccbSHans Petter Selasky 	/*
385*33ec1ccbSHans Petter Selasky 	 * Rely on caller to set consumer index so that we don't have
386*33ec1ccbSHans Petter Selasky 	 * to test hca_type in our interrupt handling fast path.
387*33ec1ccbSHans Petter Selasky 	 */
388*33ec1ccbSHans Petter Selasky 	return eqes_found;
389*33ec1ccbSHans Petter Selasky }
390*33ec1ccbSHans Petter Selasky 
mthca_tavor_interrupt(int irq,void * dev_ptr)391*33ec1ccbSHans Petter Selasky static irqreturn_t mthca_tavor_interrupt(int irq, void *dev_ptr)
392*33ec1ccbSHans Petter Selasky {
393*33ec1ccbSHans Petter Selasky 	struct mthca_dev *dev = dev_ptr;
394*33ec1ccbSHans Petter Selasky 	u32 ecr;
395*33ec1ccbSHans Petter Selasky 	int i;
396*33ec1ccbSHans Petter Selasky 
397*33ec1ccbSHans Petter Selasky 	if (dev->eq_table.clr_mask)
398*33ec1ccbSHans Petter Selasky 		writel(dev->eq_table.clr_mask, dev->eq_table.clr_int);
399*33ec1ccbSHans Petter Selasky 
400*33ec1ccbSHans Petter Selasky 	ecr = readl(dev->eq_regs.tavor.ecr_base + 4);
401*33ec1ccbSHans Petter Selasky 	if (!ecr)
402*33ec1ccbSHans Petter Selasky 		return IRQ_NONE;
403*33ec1ccbSHans Petter Selasky 
404*33ec1ccbSHans Petter Selasky 	writel(ecr, dev->eq_regs.tavor.ecr_base +
405*33ec1ccbSHans Petter Selasky 	       MTHCA_ECR_CLR_BASE - MTHCA_ECR_BASE + 4);
406*33ec1ccbSHans Petter Selasky 
407*33ec1ccbSHans Petter Selasky 	for (i = 0; i < MTHCA_NUM_EQ; ++i)
408*33ec1ccbSHans Petter Selasky 		if (ecr & dev->eq_table.eq[i].eqn_mask) {
409*33ec1ccbSHans Petter Selasky 			if (mthca_eq_int(dev, &dev->eq_table.eq[i]))
410*33ec1ccbSHans Petter Selasky 				tavor_set_eq_ci(dev, &dev->eq_table.eq[i],
411*33ec1ccbSHans Petter Selasky 						dev->eq_table.eq[i].cons_index);
412*33ec1ccbSHans Petter Selasky 			tavor_eq_req_not(dev, dev->eq_table.eq[i].eqn);
413*33ec1ccbSHans Petter Selasky 		}
414*33ec1ccbSHans Petter Selasky 
415*33ec1ccbSHans Petter Selasky 	return IRQ_HANDLED;
416*33ec1ccbSHans Petter Selasky }
417*33ec1ccbSHans Petter Selasky 
mthca_tavor_msi_x_interrupt(int irq,void * eq_ptr)418*33ec1ccbSHans Petter Selasky static irqreturn_t mthca_tavor_msi_x_interrupt(int irq, void *eq_ptr)
419*33ec1ccbSHans Petter Selasky {
420*33ec1ccbSHans Petter Selasky 	struct mthca_eq  *eq  = eq_ptr;
421*33ec1ccbSHans Petter Selasky 	struct mthca_dev *dev = eq->dev;
422*33ec1ccbSHans Petter Selasky 
423*33ec1ccbSHans Petter Selasky 	mthca_eq_int(dev, eq);
424*33ec1ccbSHans Petter Selasky 	tavor_set_eq_ci(dev, eq, eq->cons_index);
425*33ec1ccbSHans Petter Selasky 	tavor_eq_req_not(dev, eq->eqn);
426*33ec1ccbSHans Petter Selasky 
427*33ec1ccbSHans Petter Selasky 	/* MSI-X vectors always belong to us */
428*33ec1ccbSHans Petter Selasky 	return IRQ_HANDLED;
429*33ec1ccbSHans Petter Selasky }
430*33ec1ccbSHans Petter Selasky 
mthca_arbel_interrupt(int irq,void * dev_ptr)431*33ec1ccbSHans Petter Selasky static irqreturn_t mthca_arbel_interrupt(int irq, void *dev_ptr)
432*33ec1ccbSHans Petter Selasky {
433*33ec1ccbSHans Petter Selasky 	struct mthca_dev *dev = dev_ptr;
434*33ec1ccbSHans Petter Selasky 	int work = 0;
435*33ec1ccbSHans Petter Selasky 	int i;
436*33ec1ccbSHans Petter Selasky 
437*33ec1ccbSHans Petter Selasky 	if (dev->eq_table.clr_mask)
438*33ec1ccbSHans Petter Selasky 		writel(dev->eq_table.clr_mask, dev->eq_table.clr_int);
439*33ec1ccbSHans Petter Selasky 
440*33ec1ccbSHans Petter Selasky 	for (i = 0; i < MTHCA_NUM_EQ; ++i)
441*33ec1ccbSHans Petter Selasky 		if (mthca_eq_int(dev, &dev->eq_table.eq[i])) {
442*33ec1ccbSHans Petter Selasky 			work = 1;
443*33ec1ccbSHans Petter Selasky 			arbel_set_eq_ci(dev, &dev->eq_table.eq[i],
444*33ec1ccbSHans Petter Selasky 					dev->eq_table.eq[i].cons_index);
445*33ec1ccbSHans Petter Selasky 		}
446*33ec1ccbSHans Petter Selasky 
447*33ec1ccbSHans Petter Selasky 	arbel_eq_req_not(dev, dev->eq_table.arm_mask);
448*33ec1ccbSHans Petter Selasky 
449*33ec1ccbSHans Petter Selasky 	return IRQ_RETVAL(work);
450*33ec1ccbSHans Petter Selasky }
451*33ec1ccbSHans Petter Selasky 
mthca_arbel_msi_x_interrupt(int irq,void * eq_ptr)452*33ec1ccbSHans Petter Selasky static irqreturn_t mthca_arbel_msi_x_interrupt(int irq, void *eq_ptr)
453*33ec1ccbSHans Petter Selasky {
454*33ec1ccbSHans Petter Selasky 	struct mthca_eq  *eq  = eq_ptr;
455*33ec1ccbSHans Petter Selasky 	struct mthca_dev *dev = eq->dev;
456*33ec1ccbSHans Petter Selasky 
457*33ec1ccbSHans Petter Selasky 	mthca_eq_int(dev, eq);
458*33ec1ccbSHans Petter Selasky 	arbel_set_eq_ci(dev, eq, eq->cons_index);
459*33ec1ccbSHans Petter Selasky 	arbel_eq_req_not(dev, eq->eqn_mask);
460*33ec1ccbSHans Petter Selasky 
461*33ec1ccbSHans Petter Selasky 	/* MSI-X vectors always belong to us */
462*33ec1ccbSHans Petter Selasky 	return IRQ_HANDLED;
463*33ec1ccbSHans Petter Selasky }
464*33ec1ccbSHans Petter Selasky 
mthca_create_eq(struct mthca_dev * dev,int nent,u8 intr,struct mthca_eq * eq)465*33ec1ccbSHans Petter Selasky static int mthca_create_eq(struct mthca_dev *dev,
466*33ec1ccbSHans Petter Selasky 			   int nent,
467*33ec1ccbSHans Petter Selasky 			   u8 intr,
468*33ec1ccbSHans Petter Selasky 			   struct mthca_eq *eq)
469*33ec1ccbSHans Petter Selasky {
470*33ec1ccbSHans Petter Selasky 	int npages;
471*33ec1ccbSHans Petter Selasky 	u64 *dma_list = NULL;
472*33ec1ccbSHans Petter Selasky 	dma_addr_t t;
473*33ec1ccbSHans Petter Selasky 	struct mthca_mailbox *mailbox;
474*33ec1ccbSHans Petter Selasky 	struct mthca_eq_context *eq_context;
475*33ec1ccbSHans Petter Selasky 	int err = -ENOMEM;
476*33ec1ccbSHans Petter Selasky 	int i;
477*33ec1ccbSHans Petter Selasky 
478*33ec1ccbSHans Petter Selasky 	eq->dev  = dev;
479*33ec1ccbSHans Petter Selasky 	eq->nent = roundup_pow_of_two(max(nent, 2));
480*33ec1ccbSHans Petter Selasky 	npages = ALIGN(eq->nent * MTHCA_EQ_ENTRY_SIZE, PAGE_SIZE) / PAGE_SIZE;
481*33ec1ccbSHans Petter Selasky 
482*33ec1ccbSHans Petter Selasky 	eq->page_list = kmalloc(npages * sizeof *eq->page_list,
483*33ec1ccbSHans Petter Selasky 				GFP_KERNEL);
484*33ec1ccbSHans Petter Selasky 	if (!eq->page_list)
485*33ec1ccbSHans Petter Selasky 		goto err_out;
486*33ec1ccbSHans Petter Selasky 
487*33ec1ccbSHans Petter Selasky 	for (i = 0; i < npages; ++i)
488*33ec1ccbSHans Petter Selasky 		eq->page_list[i].buf = NULL;
489*33ec1ccbSHans Petter Selasky 
490*33ec1ccbSHans Petter Selasky 	dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
491*33ec1ccbSHans Petter Selasky 	if (!dma_list)
492*33ec1ccbSHans Petter Selasky 		goto err_out_free;
493*33ec1ccbSHans Petter Selasky 
494*33ec1ccbSHans Petter Selasky 	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
495*33ec1ccbSHans Petter Selasky 	if (IS_ERR(mailbox))
496*33ec1ccbSHans Petter Selasky 		goto err_out_free;
497*33ec1ccbSHans Petter Selasky 	eq_context = mailbox->buf;
498*33ec1ccbSHans Petter Selasky 
499*33ec1ccbSHans Petter Selasky 	for (i = 0; i < npages; ++i) {
500*33ec1ccbSHans Petter Selasky 		eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev,
501*33ec1ccbSHans Petter Selasky 							  PAGE_SIZE, &t, GFP_KERNEL);
502*33ec1ccbSHans Petter Selasky 		if (!eq->page_list[i].buf)
503*33ec1ccbSHans Petter Selasky 			goto err_out_free_pages;
504*33ec1ccbSHans Petter Selasky 
505*33ec1ccbSHans Petter Selasky 		dma_list[i] = t;
506*33ec1ccbSHans Petter Selasky 		dma_unmap_addr_set(&eq->page_list[i], mapping, t);
507*33ec1ccbSHans Petter Selasky 
508*33ec1ccbSHans Petter Selasky 		clear_page(eq->page_list[i].buf);
509*33ec1ccbSHans Petter Selasky 	}
510*33ec1ccbSHans Petter Selasky 
511*33ec1ccbSHans Petter Selasky 	for (i = 0; i < eq->nent; ++i)
512*33ec1ccbSHans Petter Selasky 		set_eqe_hw(get_eqe(eq, i));
513*33ec1ccbSHans Petter Selasky 
514*33ec1ccbSHans Petter Selasky 	eq->eqn = mthca_alloc(&dev->eq_table.alloc);
515*33ec1ccbSHans Petter Selasky 	if (eq->eqn == -1)
516*33ec1ccbSHans Petter Selasky 		goto err_out_free_pages;
517*33ec1ccbSHans Petter Selasky 
518*33ec1ccbSHans Petter Selasky 	err = mthca_mr_alloc_phys(dev, dev->driver_pd.pd_num,
519*33ec1ccbSHans Petter Selasky 				  dma_list, PAGE_SHIFT, npages,
520*33ec1ccbSHans Petter Selasky 				  0, npages * PAGE_SIZE,
521*33ec1ccbSHans Petter Selasky 				  MTHCA_MPT_FLAG_LOCAL_WRITE |
522*33ec1ccbSHans Petter Selasky 				  MTHCA_MPT_FLAG_LOCAL_READ,
523*33ec1ccbSHans Petter Selasky 				  &eq->mr);
524*33ec1ccbSHans Petter Selasky 	if (err)
525*33ec1ccbSHans Petter Selasky 		goto err_out_free_eq;
526*33ec1ccbSHans Petter Selasky 
527*33ec1ccbSHans Petter Selasky 	memset(eq_context, 0, sizeof *eq_context);
528*33ec1ccbSHans Petter Selasky 	eq_context->flags           = cpu_to_be32(MTHCA_EQ_STATUS_OK   |
529*33ec1ccbSHans Petter Selasky 						  MTHCA_EQ_OWNER_HW    |
530*33ec1ccbSHans Petter Selasky 						  MTHCA_EQ_STATE_ARMED |
531*33ec1ccbSHans Petter Selasky 						  MTHCA_EQ_FLAG_TR);
532*33ec1ccbSHans Petter Selasky 	if (mthca_is_memfree(dev))
533*33ec1ccbSHans Petter Selasky 		eq_context->flags  |= cpu_to_be32(MTHCA_EQ_STATE_ARBEL);
534*33ec1ccbSHans Petter Selasky 
535*33ec1ccbSHans Petter Selasky 	eq_context->logsize_usrpage = cpu_to_be32((ffs(eq->nent) - 1) << 24);
536*33ec1ccbSHans Petter Selasky 	if (mthca_is_memfree(dev)) {
537*33ec1ccbSHans Petter Selasky 		eq_context->arbel_pd = cpu_to_be32(dev->driver_pd.pd_num);
538*33ec1ccbSHans Petter Selasky 	} else {
539*33ec1ccbSHans Petter Selasky 		eq_context->logsize_usrpage |= cpu_to_be32(dev->driver_uar.index);
540*33ec1ccbSHans Petter Selasky 		eq_context->tavor_pd         = cpu_to_be32(dev->driver_pd.pd_num);
541*33ec1ccbSHans Petter Selasky 	}
542*33ec1ccbSHans Petter Selasky 	eq_context->intr            = intr;
543*33ec1ccbSHans Petter Selasky 	eq_context->lkey            = cpu_to_be32(eq->mr.ibmr.lkey);
544*33ec1ccbSHans Petter Selasky 
545*33ec1ccbSHans Petter Selasky 	err = mthca_SW2HW_EQ(dev, mailbox, eq->eqn);
546*33ec1ccbSHans Petter Selasky 	if (err) {
547*33ec1ccbSHans Petter Selasky 		mthca_warn(dev, "SW2HW_EQ returned %d\n", err);
548*33ec1ccbSHans Petter Selasky 		goto err_out_free_mr;
549*33ec1ccbSHans Petter Selasky 	}
550*33ec1ccbSHans Petter Selasky 
551*33ec1ccbSHans Petter Selasky 	kfree(dma_list);
552*33ec1ccbSHans Petter Selasky 	mthca_free_mailbox(dev, mailbox);
553*33ec1ccbSHans Petter Selasky 
554*33ec1ccbSHans Petter Selasky 	eq->eqn_mask   = swab32(1 << eq->eqn);
555*33ec1ccbSHans Petter Selasky 	eq->cons_index = 0;
556*33ec1ccbSHans Petter Selasky 
557*33ec1ccbSHans Petter Selasky 	dev->eq_table.arm_mask |= eq->eqn_mask;
558*33ec1ccbSHans Petter Selasky 
559*33ec1ccbSHans Petter Selasky 	mthca_dbg(dev, "Allocated EQ %d with %d entries\n",
560*33ec1ccbSHans Petter Selasky 		  eq->eqn, eq->nent);
561*33ec1ccbSHans Petter Selasky 
562*33ec1ccbSHans Petter Selasky 	return err;
563*33ec1ccbSHans Petter Selasky 
564*33ec1ccbSHans Petter Selasky  err_out_free_mr:
565*33ec1ccbSHans Petter Selasky 	mthca_free_mr(dev, &eq->mr);
566*33ec1ccbSHans Petter Selasky 
567*33ec1ccbSHans Petter Selasky  err_out_free_eq:
568*33ec1ccbSHans Petter Selasky 	mthca_free(&dev->eq_table.alloc, eq->eqn);
569*33ec1ccbSHans Petter Selasky 
570*33ec1ccbSHans Petter Selasky  err_out_free_pages:
571*33ec1ccbSHans Petter Selasky 	for (i = 0; i < npages; ++i)
572*33ec1ccbSHans Petter Selasky 		if (eq->page_list[i].buf)
573*33ec1ccbSHans Petter Selasky 			dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
574*33ec1ccbSHans Petter Selasky 					  eq->page_list[i].buf,
575*33ec1ccbSHans Petter Selasky 					  dma_unmap_addr(&eq->page_list[i],
576*33ec1ccbSHans Petter Selasky 							 mapping));
577*33ec1ccbSHans Petter Selasky 
578*33ec1ccbSHans Petter Selasky 	mthca_free_mailbox(dev, mailbox);
579*33ec1ccbSHans Petter Selasky 
580*33ec1ccbSHans Petter Selasky  err_out_free:
581*33ec1ccbSHans Petter Selasky 	kfree(eq->page_list);
582*33ec1ccbSHans Petter Selasky 	kfree(dma_list);
583*33ec1ccbSHans Petter Selasky 
584*33ec1ccbSHans Petter Selasky  err_out:
585*33ec1ccbSHans Petter Selasky 	return err;
586*33ec1ccbSHans Petter Selasky }
587*33ec1ccbSHans Petter Selasky 
mthca_free_eq(struct mthca_dev * dev,struct mthca_eq * eq)588*33ec1ccbSHans Petter Selasky static void mthca_free_eq(struct mthca_dev *dev,
589*33ec1ccbSHans Petter Selasky 			  struct mthca_eq *eq)
590*33ec1ccbSHans Petter Selasky {
591*33ec1ccbSHans Petter Selasky 	struct mthca_mailbox *mailbox;
592*33ec1ccbSHans Petter Selasky 	int err;
593*33ec1ccbSHans Petter Selasky 	int npages = (eq->nent * MTHCA_EQ_ENTRY_SIZE + PAGE_SIZE - 1) /
594*33ec1ccbSHans Petter Selasky 		PAGE_SIZE;
595*33ec1ccbSHans Petter Selasky 	int i;
596*33ec1ccbSHans Petter Selasky 
597*33ec1ccbSHans Petter Selasky 	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
598*33ec1ccbSHans Petter Selasky 	if (IS_ERR(mailbox))
599*33ec1ccbSHans Petter Selasky 		return;
600*33ec1ccbSHans Petter Selasky 
601*33ec1ccbSHans Petter Selasky 	err = mthca_HW2SW_EQ(dev, mailbox, eq->eqn);
602*33ec1ccbSHans Petter Selasky 	if (err)
603*33ec1ccbSHans Petter Selasky 		mthca_warn(dev, "HW2SW_EQ returned %d\n", err);
604*33ec1ccbSHans Petter Selasky 
605*33ec1ccbSHans Petter Selasky 	dev->eq_table.arm_mask &= ~eq->eqn_mask;
606*33ec1ccbSHans Petter Selasky 
607*33ec1ccbSHans Petter Selasky 	if (0) {
608*33ec1ccbSHans Petter Selasky 		mthca_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn);
609*33ec1ccbSHans Petter Selasky 		for (i = 0; i < sizeof (struct mthca_eq_context) / 4; ++i) {
610*33ec1ccbSHans Petter Selasky 			if (i % 4 == 0)
611*33ec1ccbSHans Petter Selasky 				printk("[%02x] ", i * 4);
612*33ec1ccbSHans Petter Selasky 			printk(" %08x", be32_to_cpup(mailbox->buf + i * 4));
613*33ec1ccbSHans Petter Selasky 			if ((i + 1) % 4 == 0)
614*33ec1ccbSHans Petter Selasky 				printk("\n");
615*33ec1ccbSHans Petter Selasky 		}
616*33ec1ccbSHans Petter Selasky 	}
617*33ec1ccbSHans Petter Selasky 
618*33ec1ccbSHans Petter Selasky 	mthca_free_mr(dev, &eq->mr);
619*33ec1ccbSHans Petter Selasky 	for (i = 0; i < npages; ++i)
620*33ec1ccbSHans Petter Selasky 		pci_free_consistent(dev->pdev, PAGE_SIZE,
621*33ec1ccbSHans Petter Selasky 				    eq->page_list[i].buf,
622*33ec1ccbSHans Petter Selasky 				    dma_unmap_addr(&eq->page_list[i], mapping));
623*33ec1ccbSHans Petter Selasky 
624*33ec1ccbSHans Petter Selasky 	kfree(eq->page_list);
625*33ec1ccbSHans Petter Selasky 	mthca_free_mailbox(dev, mailbox);
626*33ec1ccbSHans Petter Selasky }
627*33ec1ccbSHans Petter Selasky 
mthca_free_irqs(struct mthca_dev * dev)628*33ec1ccbSHans Petter Selasky static void mthca_free_irqs(struct mthca_dev *dev)
629*33ec1ccbSHans Petter Selasky {
630*33ec1ccbSHans Petter Selasky 	int i;
631*33ec1ccbSHans Petter Selasky 
632*33ec1ccbSHans Petter Selasky 	if (dev->eq_table.have_irq)
633*33ec1ccbSHans Petter Selasky 		free_irq(dev->pdev->irq, dev);
634*33ec1ccbSHans Petter Selasky 	for (i = 0; i < MTHCA_NUM_EQ; ++i)
635*33ec1ccbSHans Petter Selasky 		if (dev->eq_table.eq[i].have_irq) {
636*33ec1ccbSHans Petter Selasky 			free_irq(dev->eq_table.eq[i].msi_x_vector,
637*33ec1ccbSHans Petter Selasky 				 dev->eq_table.eq + i);
638*33ec1ccbSHans Petter Selasky 			dev->eq_table.eq[i].have_irq = 0;
639*33ec1ccbSHans Petter Selasky 		}
640*33ec1ccbSHans Petter Selasky }
641*33ec1ccbSHans Petter Selasky 
mthca_map_reg(struct mthca_dev * dev,unsigned long offset,unsigned long size,void __iomem ** map)642*33ec1ccbSHans Petter Selasky static int mthca_map_reg(struct mthca_dev *dev,
643*33ec1ccbSHans Petter Selasky 			 unsigned long offset, unsigned long size,
644*33ec1ccbSHans Petter Selasky 			 void __iomem **map)
645*33ec1ccbSHans Petter Selasky {
646*33ec1ccbSHans Petter Selasky 	phys_addr_t base = pci_resource_start(dev->pdev, 0);
647*33ec1ccbSHans Petter Selasky 
648*33ec1ccbSHans Petter Selasky 	*map = ioremap(base + offset, size);
649*33ec1ccbSHans Petter Selasky 	if (!*map)
650*33ec1ccbSHans Petter Selasky 		return -ENOMEM;
651*33ec1ccbSHans Petter Selasky 
652*33ec1ccbSHans Petter Selasky 	return 0;
653*33ec1ccbSHans Petter Selasky }
654*33ec1ccbSHans Petter Selasky 
mthca_map_eq_regs(struct mthca_dev * dev)655*33ec1ccbSHans Petter Selasky static int mthca_map_eq_regs(struct mthca_dev *dev)
656*33ec1ccbSHans Petter Selasky {
657*33ec1ccbSHans Petter Selasky 	if (mthca_is_memfree(dev)) {
658*33ec1ccbSHans Petter Selasky 		/*
659*33ec1ccbSHans Petter Selasky 		 * We assume that the EQ arm and EQ set CI registers
660*33ec1ccbSHans Petter Selasky 		 * fall within the first BAR.  We can't trust the
661*33ec1ccbSHans Petter Selasky 		 * values firmware gives us, since those addresses are
662*33ec1ccbSHans Petter Selasky 		 * valid on the HCA's side of the PCI bus but not
663*33ec1ccbSHans Petter Selasky 		 * necessarily the host side.
664*33ec1ccbSHans Petter Selasky 		 */
665*33ec1ccbSHans Petter Selasky 		if (mthca_map_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) &
666*33ec1ccbSHans Petter Selasky 				  dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE,
667*33ec1ccbSHans Petter Selasky 				  &dev->clr_base)) {
668*33ec1ccbSHans Petter Selasky 			mthca_err(dev, "Couldn't map interrupt clear register, "
669*33ec1ccbSHans Petter Selasky 				  "aborting.\n");
670*33ec1ccbSHans Petter Selasky 			return -ENOMEM;
671*33ec1ccbSHans Petter Selasky 		}
672*33ec1ccbSHans Petter Selasky 
673*33ec1ccbSHans Petter Selasky 		/*
674*33ec1ccbSHans Petter Selasky 		 * Add 4 because we limit ourselves to EQs 0 ... 31,
675*33ec1ccbSHans Petter Selasky 		 * so we only need the low word of the register.
676*33ec1ccbSHans Petter Selasky 		 */
677*33ec1ccbSHans Petter Selasky 		if (mthca_map_reg(dev, ((pci_resource_len(dev->pdev, 0) - 1) &
678*33ec1ccbSHans Petter Selasky 					dev->fw.arbel.eq_arm_base) + 4, 4,
679*33ec1ccbSHans Petter Selasky 				  &dev->eq_regs.arbel.eq_arm)) {
680*33ec1ccbSHans Petter Selasky 			mthca_err(dev, "Couldn't map EQ arm register, aborting.\n");
681*33ec1ccbSHans Petter Selasky 			iounmap(dev->clr_base);
682*33ec1ccbSHans Petter Selasky 			return -ENOMEM;
683*33ec1ccbSHans Petter Selasky 		}
684*33ec1ccbSHans Petter Selasky 
685*33ec1ccbSHans Petter Selasky 		if (mthca_map_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) &
686*33ec1ccbSHans Petter Selasky 				  dev->fw.arbel.eq_set_ci_base,
687*33ec1ccbSHans Petter Selasky 				  MTHCA_EQ_SET_CI_SIZE,
688*33ec1ccbSHans Petter Selasky 				  &dev->eq_regs.arbel.eq_set_ci_base)) {
689*33ec1ccbSHans Petter Selasky 			mthca_err(dev, "Couldn't map EQ CI register, aborting.\n");
690*33ec1ccbSHans Petter Selasky 			iounmap(dev->eq_regs.arbel.eq_arm);
691*33ec1ccbSHans Petter Selasky 			iounmap(dev->clr_base);
692*33ec1ccbSHans Petter Selasky 			return -ENOMEM;
693*33ec1ccbSHans Petter Selasky 		}
694*33ec1ccbSHans Petter Selasky 	} else {
695*33ec1ccbSHans Petter Selasky 		if (mthca_map_reg(dev, MTHCA_CLR_INT_BASE, MTHCA_CLR_INT_SIZE,
696*33ec1ccbSHans Petter Selasky 				  &dev->clr_base)) {
697*33ec1ccbSHans Petter Selasky 			mthca_err(dev, "Couldn't map interrupt clear register, "
698*33ec1ccbSHans Petter Selasky 				  "aborting.\n");
699*33ec1ccbSHans Petter Selasky 			return -ENOMEM;
700*33ec1ccbSHans Petter Selasky 		}
701*33ec1ccbSHans Petter Selasky 
702*33ec1ccbSHans Petter Selasky 		if (mthca_map_reg(dev, MTHCA_ECR_BASE,
703*33ec1ccbSHans Petter Selasky 				  MTHCA_ECR_SIZE + MTHCA_ECR_CLR_SIZE,
704*33ec1ccbSHans Petter Selasky 				  &dev->eq_regs.tavor.ecr_base)) {
705*33ec1ccbSHans Petter Selasky 			mthca_err(dev, "Couldn't map ecr register, "
706*33ec1ccbSHans Petter Selasky 				  "aborting.\n");
707*33ec1ccbSHans Petter Selasky 			iounmap(dev->clr_base);
708*33ec1ccbSHans Petter Selasky 			return -ENOMEM;
709*33ec1ccbSHans Petter Selasky 		}
710*33ec1ccbSHans Petter Selasky 	}
711*33ec1ccbSHans Petter Selasky 
712*33ec1ccbSHans Petter Selasky 	return 0;
713*33ec1ccbSHans Petter Selasky 
714*33ec1ccbSHans Petter Selasky }
715*33ec1ccbSHans Petter Selasky 
mthca_unmap_eq_regs(struct mthca_dev * dev)716*33ec1ccbSHans Petter Selasky static void mthca_unmap_eq_regs(struct mthca_dev *dev)
717*33ec1ccbSHans Petter Selasky {
718*33ec1ccbSHans Petter Selasky 	if (mthca_is_memfree(dev)) {
719*33ec1ccbSHans Petter Selasky 		iounmap(dev->eq_regs.arbel.eq_set_ci_base);
720*33ec1ccbSHans Petter Selasky 		iounmap(dev->eq_regs.arbel.eq_arm);
721*33ec1ccbSHans Petter Selasky 		iounmap(dev->clr_base);
722*33ec1ccbSHans Petter Selasky 	} else {
723*33ec1ccbSHans Petter Selasky 		iounmap(dev->eq_regs.tavor.ecr_base);
724*33ec1ccbSHans Petter Selasky 		iounmap(dev->clr_base);
725*33ec1ccbSHans Petter Selasky 	}
726*33ec1ccbSHans Petter Selasky }
727*33ec1ccbSHans Petter Selasky 
mthca_map_eq_icm(struct mthca_dev * dev,u64 icm_virt)728*33ec1ccbSHans Petter Selasky int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt)
729*33ec1ccbSHans Petter Selasky {
730*33ec1ccbSHans Petter Selasky 	int ret;
731*33ec1ccbSHans Petter Selasky 
732*33ec1ccbSHans Petter Selasky 	/*
733*33ec1ccbSHans Petter Selasky 	 * We assume that mapping one page is enough for the whole EQ
734*33ec1ccbSHans Petter Selasky 	 * context table.  This is fine with all current HCAs, because
735*33ec1ccbSHans Petter Selasky 	 * we only use 32 EQs and each EQ uses 32 bytes of context
736*33ec1ccbSHans Petter Selasky 	 * memory, or 1 KB total.
737*33ec1ccbSHans Petter Selasky 	 */
738*33ec1ccbSHans Petter Selasky 	dev->eq_table.icm_virt = icm_virt;
739*33ec1ccbSHans Petter Selasky 	dev->eq_table.icm_page = alloc_page(GFP_HIGHUSER);
740*33ec1ccbSHans Petter Selasky 	if (!dev->eq_table.icm_page)
741*33ec1ccbSHans Petter Selasky 		return -ENOMEM;
742*33ec1ccbSHans Petter Selasky 	dev->eq_table.icm_dma  = pci_map_page(dev->pdev, dev->eq_table.icm_page, 0,
743*33ec1ccbSHans Petter Selasky 					      PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
744*33ec1ccbSHans Petter Selasky 	if (pci_dma_mapping_error(dev->pdev, dev->eq_table.icm_dma)) {
745*33ec1ccbSHans Petter Selasky 		__free_page(dev->eq_table.icm_page);
746*33ec1ccbSHans Petter Selasky 		return -ENOMEM;
747*33ec1ccbSHans Petter Selasky 	}
748*33ec1ccbSHans Petter Selasky 
749*33ec1ccbSHans Petter Selasky 	ret = mthca_MAP_ICM_page(dev, dev->eq_table.icm_dma, icm_virt);
750*33ec1ccbSHans Petter Selasky 	if (ret) {
751*33ec1ccbSHans Petter Selasky 		pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE,
752*33ec1ccbSHans Petter Selasky 			       PCI_DMA_BIDIRECTIONAL);
753*33ec1ccbSHans Petter Selasky 		__free_page(dev->eq_table.icm_page);
754*33ec1ccbSHans Petter Selasky 	}
755*33ec1ccbSHans Petter Selasky 
756*33ec1ccbSHans Petter Selasky 	return ret;
757*33ec1ccbSHans Petter Selasky }
758*33ec1ccbSHans Petter Selasky 
mthca_unmap_eq_icm(struct mthca_dev * dev)759*33ec1ccbSHans Petter Selasky void mthca_unmap_eq_icm(struct mthca_dev *dev)
760*33ec1ccbSHans Petter Selasky {
761*33ec1ccbSHans Petter Selasky 	mthca_UNMAP_ICM(dev, dev->eq_table.icm_virt, 1);
762*33ec1ccbSHans Petter Selasky 	pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE,
763*33ec1ccbSHans Petter Selasky 		       PCI_DMA_BIDIRECTIONAL);
764*33ec1ccbSHans Petter Selasky 	__free_page(dev->eq_table.icm_page);
765*33ec1ccbSHans Petter Selasky }
766*33ec1ccbSHans Petter Selasky 
mthca_init_eq_table(struct mthca_dev * dev)767*33ec1ccbSHans Petter Selasky int mthca_init_eq_table(struct mthca_dev *dev)
768*33ec1ccbSHans Petter Selasky {
769*33ec1ccbSHans Petter Selasky 	int err;
770*33ec1ccbSHans Petter Selasky 	u8 intr;
771*33ec1ccbSHans Petter Selasky 	int i;
772*33ec1ccbSHans Petter Selasky 
773*33ec1ccbSHans Petter Selasky 	err = mthca_alloc_init(&dev->eq_table.alloc,
774*33ec1ccbSHans Petter Selasky 			       dev->limits.num_eqs,
775*33ec1ccbSHans Petter Selasky 			       dev->limits.num_eqs - 1,
776*33ec1ccbSHans Petter Selasky 			       dev->limits.reserved_eqs);
777*33ec1ccbSHans Petter Selasky 	if (err)
778*33ec1ccbSHans Petter Selasky 		return err;
779*33ec1ccbSHans Petter Selasky 
780*33ec1ccbSHans Petter Selasky 	err = mthca_map_eq_regs(dev);
781*33ec1ccbSHans Petter Selasky 	if (err)
782*33ec1ccbSHans Petter Selasky 		goto err_out_free;
783*33ec1ccbSHans Petter Selasky 
784*33ec1ccbSHans Petter Selasky 	if (dev->mthca_flags & MTHCA_FLAG_MSI_X) {
785*33ec1ccbSHans Petter Selasky 		dev->eq_table.clr_mask = 0;
786*33ec1ccbSHans Petter Selasky 	} else {
787*33ec1ccbSHans Petter Selasky 		dev->eq_table.clr_mask =
788*33ec1ccbSHans Petter Selasky 			swab32(1 << (dev->eq_table.inta_pin & 31));
789*33ec1ccbSHans Petter Selasky 		dev->eq_table.clr_int  = dev->clr_base +
790*33ec1ccbSHans Petter Selasky 			(dev->eq_table.inta_pin < 32 ? 4 : 0);
791*33ec1ccbSHans Petter Selasky 	}
792*33ec1ccbSHans Petter Selasky 
793*33ec1ccbSHans Petter Selasky 	dev->eq_table.arm_mask = 0;
794*33ec1ccbSHans Petter Selasky 
795*33ec1ccbSHans Petter Selasky 	intr = dev->eq_table.inta_pin;
796*33ec1ccbSHans Petter Selasky 
797*33ec1ccbSHans Petter Selasky 	err = mthca_create_eq(dev, dev->limits.num_cqs + MTHCA_NUM_SPARE_EQE,
798*33ec1ccbSHans Petter Selasky 			      (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 128 : intr,
799*33ec1ccbSHans Petter Selasky 			      &dev->eq_table.eq[MTHCA_EQ_COMP]);
800*33ec1ccbSHans Petter Selasky 	if (err)
801*33ec1ccbSHans Petter Selasky 		goto err_out_unmap;
802*33ec1ccbSHans Petter Selasky 
803*33ec1ccbSHans Petter Selasky 	err = mthca_create_eq(dev, MTHCA_NUM_ASYNC_EQE + MTHCA_NUM_SPARE_EQE,
804*33ec1ccbSHans Petter Selasky 			      (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 129 : intr,
805*33ec1ccbSHans Petter Selasky 			      &dev->eq_table.eq[MTHCA_EQ_ASYNC]);
806*33ec1ccbSHans Petter Selasky 	if (err)
807*33ec1ccbSHans Petter Selasky 		goto err_out_comp;
808*33ec1ccbSHans Petter Selasky 
809*33ec1ccbSHans Petter Selasky 	err = mthca_create_eq(dev, MTHCA_NUM_CMD_EQE + MTHCA_NUM_SPARE_EQE,
810*33ec1ccbSHans Petter Selasky 			      (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 130 : intr,
811*33ec1ccbSHans Petter Selasky 			      &dev->eq_table.eq[MTHCA_EQ_CMD]);
812*33ec1ccbSHans Petter Selasky 	if (err)
813*33ec1ccbSHans Petter Selasky 		goto err_out_async;
814*33ec1ccbSHans Petter Selasky 
815*33ec1ccbSHans Petter Selasky 	if (dev->mthca_flags & MTHCA_FLAG_MSI_X) {
816*33ec1ccbSHans Petter Selasky 		static const char *eq_name[] = {
817*33ec1ccbSHans Petter Selasky 			[MTHCA_EQ_COMP]  = DRV_NAME "-comp",
818*33ec1ccbSHans Petter Selasky 			[MTHCA_EQ_ASYNC] = DRV_NAME "-async",
819*33ec1ccbSHans Petter Selasky 			[MTHCA_EQ_CMD]   = DRV_NAME "-cmd"
820*33ec1ccbSHans Petter Selasky 		};
821*33ec1ccbSHans Petter Selasky 
822*33ec1ccbSHans Petter Selasky 		for (i = 0; i < MTHCA_NUM_EQ; ++i) {
823*33ec1ccbSHans Petter Selasky 			snprintf(dev->eq_table.eq[i].irq_name,
824*33ec1ccbSHans Petter Selasky 				 IB_DEVICE_NAME_MAX,
825*33ec1ccbSHans Petter Selasky 				 "%s@pci:%s", eq_name[i],
826*33ec1ccbSHans Petter Selasky 				 pci_name(dev->pdev));
827*33ec1ccbSHans Petter Selasky 			err = request_irq(dev->eq_table.eq[i].msi_x_vector,
828*33ec1ccbSHans Petter Selasky 					  mthca_is_memfree(dev) ?
829*33ec1ccbSHans Petter Selasky 					  mthca_arbel_msi_x_interrupt :
830*33ec1ccbSHans Petter Selasky 					  mthca_tavor_msi_x_interrupt,
831*33ec1ccbSHans Petter Selasky 					  0, dev->eq_table.eq[i].irq_name,
832*33ec1ccbSHans Petter Selasky 					  dev->eq_table.eq + i);
833*33ec1ccbSHans Petter Selasky 			if (err)
834*33ec1ccbSHans Petter Selasky 				goto err_out_cmd;
835*33ec1ccbSHans Petter Selasky 			dev->eq_table.eq[i].have_irq = 1;
836*33ec1ccbSHans Petter Selasky 		}
837*33ec1ccbSHans Petter Selasky 	} else {
838*33ec1ccbSHans Petter Selasky 		snprintf(dev->eq_table.eq[0].irq_name, IB_DEVICE_NAME_MAX,
839*33ec1ccbSHans Petter Selasky 			 DRV_NAME "@pci:%s", pci_name(dev->pdev));
840*33ec1ccbSHans Petter Selasky 		err = request_irq(dev->pdev->irq,
841*33ec1ccbSHans Petter Selasky 				  mthca_is_memfree(dev) ?
842*33ec1ccbSHans Petter Selasky 				  mthca_arbel_interrupt :
843*33ec1ccbSHans Petter Selasky 				  mthca_tavor_interrupt,
844*33ec1ccbSHans Petter Selasky 				  IRQF_SHARED, dev->eq_table.eq[0].irq_name, dev);
845*33ec1ccbSHans Petter Selasky 		if (err)
846*33ec1ccbSHans Petter Selasky 			goto err_out_cmd;
847*33ec1ccbSHans Petter Selasky 		dev->eq_table.have_irq = 1;
848*33ec1ccbSHans Petter Selasky 	}
849*33ec1ccbSHans Petter Selasky 
850*33ec1ccbSHans Petter Selasky 	err = mthca_MAP_EQ(dev, async_mask(dev),
851*33ec1ccbSHans Petter Selasky 			   0, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn);
852*33ec1ccbSHans Petter Selasky 	if (err)
853*33ec1ccbSHans Petter Selasky 		mthca_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
854*33ec1ccbSHans Petter Selasky 			   dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, err);
855*33ec1ccbSHans Petter Selasky 
856*33ec1ccbSHans Petter Selasky 	err = mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK,
857*33ec1ccbSHans Petter Selasky 			   0, dev->eq_table.eq[MTHCA_EQ_CMD].eqn);
858*33ec1ccbSHans Petter Selasky 	if (err)
859*33ec1ccbSHans Petter Selasky 		mthca_warn(dev, "MAP_EQ for cmd EQ %d failed (%d)\n",
860*33ec1ccbSHans Petter Selasky 			   dev->eq_table.eq[MTHCA_EQ_CMD].eqn, err);
861*33ec1ccbSHans Petter Selasky 
862*33ec1ccbSHans Petter Selasky 	for (i = 0; i < MTHCA_NUM_EQ; ++i)
863*33ec1ccbSHans Petter Selasky 		if (mthca_is_memfree(dev))
864*33ec1ccbSHans Petter Selasky 			arbel_eq_req_not(dev, dev->eq_table.eq[i].eqn_mask);
865*33ec1ccbSHans Petter Selasky 		else
866*33ec1ccbSHans Petter Selasky 			tavor_eq_req_not(dev, dev->eq_table.eq[i].eqn);
867*33ec1ccbSHans Petter Selasky 
868*33ec1ccbSHans Petter Selasky 	return 0;
869*33ec1ccbSHans Petter Selasky 
870*33ec1ccbSHans Petter Selasky err_out_cmd:
871*33ec1ccbSHans Petter Selasky 	mthca_free_irqs(dev);
872*33ec1ccbSHans Petter Selasky 	mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_CMD]);
873*33ec1ccbSHans Petter Selasky 
874*33ec1ccbSHans Petter Selasky err_out_async:
875*33ec1ccbSHans Petter Selasky 	mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_ASYNC]);
876*33ec1ccbSHans Petter Selasky 
877*33ec1ccbSHans Petter Selasky err_out_comp:
878*33ec1ccbSHans Petter Selasky 	mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_COMP]);
879*33ec1ccbSHans Petter Selasky 
880*33ec1ccbSHans Petter Selasky err_out_unmap:
881*33ec1ccbSHans Petter Selasky 	mthca_unmap_eq_regs(dev);
882*33ec1ccbSHans Petter Selasky 
883*33ec1ccbSHans Petter Selasky err_out_free:
884*33ec1ccbSHans Petter Selasky 	mthca_alloc_cleanup(&dev->eq_table.alloc);
885*33ec1ccbSHans Petter Selasky 	return err;
886*33ec1ccbSHans Petter Selasky }
887*33ec1ccbSHans Petter Selasky 
mthca_cleanup_eq_table(struct mthca_dev * dev)888*33ec1ccbSHans Petter Selasky void mthca_cleanup_eq_table(struct mthca_dev *dev)
889*33ec1ccbSHans Petter Selasky {
890*33ec1ccbSHans Petter Selasky 	int i;
891*33ec1ccbSHans Petter Selasky 
892*33ec1ccbSHans Petter Selasky 	mthca_free_irqs(dev);
893*33ec1ccbSHans Petter Selasky 
894*33ec1ccbSHans Petter Selasky 	mthca_MAP_EQ(dev, async_mask(dev),
895*33ec1ccbSHans Petter Selasky 		     1, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn);
896*33ec1ccbSHans Petter Selasky 	mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK,
897*33ec1ccbSHans Petter Selasky 		     1, dev->eq_table.eq[MTHCA_EQ_CMD].eqn);
898*33ec1ccbSHans Petter Selasky 
899*33ec1ccbSHans Petter Selasky 	for (i = 0; i < MTHCA_NUM_EQ; ++i)
900*33ec1ccbSHans Petter Selasky 		mthca_free_eq(dev, &dev->eq_table.eq[i]);
901*33ec1ccbSHans Petter Selasky 
902*33ec1ccbSHans Petter Selasky 	mthca_unmap_eq_regs(dev);
903*33ec1ccbSHans Petter Selasky 
904*33ec1ccbSHans Petter Selasky 	mthca_alloc_cleanup(&dev->eq_table.alloc);
905*33ec1ccbSHans Petter Selasky }
906