xref: /linux/drivers/net/ethernet/amd/pds_core/adminq.c (revision 85502b2214d50ba0ddf2a5fb454e4d28a160d175)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2023 Advanced Micro Devices, Inc */
3 
4 #include <linux/dynamic_debug.h>
5 
6 #include "core.h"
7 
pdsc_process_notifyq(struct pdsc_qcq * qcq)8 static int pdsc_process_notifyq(struct pdsc_qcq *qcq)
9 {
10 	union pds_core_notifyq_comp *comp;
11 	struct pdsc *pdsc = qcq->pdsc;
12 	struct pdsc_cq *cq = &qcq->cq;
13 	struct pdsc_cq_info *cq_info;
14 	int nq_work = 0;
15 	u64 eid;
16 
17 	cq_info = &cq->info[cq->tail_idx];
18 	comp = cq_info->comp;
19 	eid = le64_to_cpu(comp->event.eid);
20 	while (eid > pdsc->last_eid) {
21 		u16 ecode = le16_to_cpu(comp->event.ecode);
22 
23 		switch (ecode) {
24 		case PDS_EVENT_LINK_CHANGE:
25 			dev_info(pdsc->dev, "NotifyQ LINK_CHANGE ecode %d eid %lld\n",
26 				 ecode, eid);
27 			pdsc_notify(PDS_EVENT_LINK_CHANGE, comp);
28 			break;
29 
30 		case PDS_EVENT_RESET:
31 			dev_info(pdsc->dev, "NotifyQ RESET ecode %d eid %lld\n",
32 				 ecode, eid);
33 			pdsc_notify(PDS_EVENT_RESET, comp);
34 			break;
35 
36 		case PDS_EVENT_XCVR:
37 			dev_info(pdsc->dev, "NotifyQ XCVR ecode %d eid %lld\n",
38 				 ecode, eid);
39 			break;
40 
41 		default:
42 			dev_info(pdsc->dev, "NotifyQ ecode %d eid %lld\n",
43 				 ecode, eid);
44 			break;
45 		}
46 
47 		pdsc->last_eid = eid;
48 		cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
49 		cq_info = &cq->info[cq->tail_idx];
50 		comp = cq_info->comp;
51 		eid = le64_to_cpu(comp->event.eid);
52 
53 		nq_work++;
54 	}
55 
56 	qcq->accum_work += nq_work;
57 
58 	return nq_work;
59 }
60 
pdsc_adminq_inc_if_up(struct pdsc * pdsc)61 static bool pdsc_adminq_inc_if_up(struct pdsc *pdsc)
62 {
63 	if (pdsc->state & BIT_ULL(PDSC_S_STOPPING_DRIVER) ||
64 	    pdsc->state & BIT_ULL(PDSC_S_FW_DEAD))
65 		return false;
66 
67 	return refcount_inc_not_zero(&pdsc->adminq_refcnt);
68 }
69 
pdsc_process_adminq(struct pdsc_qcq * qcq)70 void pdsc_process_adminq(struct pdsc_qcq *qcq)
71 {
72 	union pds_core_adminq_comp *comp;
73 	struct pdsc_queue *q = &qcq->q;
74 	struct pdsc *pdsc = qcq->pdsc;
75 	struct pdsc_cq *cq = &qcq->cq;
76 	struct pdsc_q_info *q_info;
77 	unsigned long irqflags;
78 	int nq_work = 0;
79 	int aq_work = 0;
80 
81 	/* Don't process AdminQ when it's not up */
82 	if (!pdsc_adminq_inc_if_up(pdsc)) {
83 		dev_err(pdsc->dev, "%s: called while adminq is unavailable\n",
84 			__func__);
85 		return;
86 	}
87 
88 	/* Check for NotifyQ event */
89 	nq_work = pdsc_process_notifyq(&pdsc->notifyqcq);
90 
91 	/* Check for empty queue, which can happen if the interrupt was
92 	 * for a NotifyQ event and there are no new AdminQ completions.
93 	 */
94 	if (q->tail_idx == q->head_idx)
95 		goto credits;
96 
97 	/* Find the first completion to clean,
98 	 * run the callback in the related q_info,
99 	 * and continue while we still match done color
100 	 */
101 	spin_lock_irqsave(&pdsc->adminq_lock, irqflags);
102 	comp = cq->info[cq->tail_idx].comp;
103 	while (pdsc_color_match(comp->color, cq->done_color)) {
104 		q_info = &q->info[q->tail_idx];
105 		q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
106 
107 		if (!completion_done(&q_info->completion)) {
108 			memcpy(q_info->dest, comp, sizeof(*comp));
109 			complete(&q_info->completion);
110 		}
111 
112 		if (cq->tail_idx == cq->num_descs - 1)
113 			cq->done_color = !cq->done_color;
114 		cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
115 		comp = cq->info[cq->tail_idx].comp;
116 
117 		aq_work++;
118 	}
119 	spin_unlock_irqrestore(&pdsc->adminq_lock, irqflags);
120 
121 	qcq->accum_work += aq_work;
122 
123 credits:
124 	/* Return the interrupt credits, one for each completion */
125 	pds_core_intr_credits(&pdsc->intr_ctrl[qcq->intx],
126 			      nq_work + aq_work,
127 			      PDS_CORE_INTR_CRED_REARM);
128 	refcount_dec(&pdsc->adminq_refcnt);
129 }
130 
pdsc_work_thread(struct work_struct * work)131 void pdsc_work_thread(struct work_struct *work)
132 {
133 	struct pdsc_qcq *qcq = container_of(work, struct pdsc_qcq, work);
134 
135 	pdsc_process_adminq(qcq);
136 }
137 
pdsc_adminq_isr(int irq,void * data)138 irqreturn_t pdsc_adminq_isr(int irq, void *data)
139 {
140 	struct pdsc *pdsc = data;
141 	struct pdsc_qcq *qcq;
142 
143 	/* Don't process AdminQ when it's not up */
144 	if (!pdsc_adminq_inc_if_up(pdsc)) {
145 		dev_err(pdsc->dev, "%s: called while adminq is unavailable\n",
146 			__func__);
147 		return IRQ_HANDLED;
148 	}
149 
150 	qcq = &pdsc->adminqcq;
151 	queue_work(pdsc->wq, &qcq->work);
152 	refcount_dec(&pdsc->adminq_refcnt);
153 
154 	return IRQ_HANDLED;
155 }
156 
__pdsc_adminq_post(struct pdsc * pdsc,struct pdsc_qcq * qcq,union pds_core_adminq_cmd * cmd,union pds_core_adminq_comp * comp)157 static int __pdsc_adminq_post(struct pdsc *pdsc,
158 			      struct pdsc_qcq *qcq,
159 			      union pds_core_adminq_cmd *cmd,
160 			      union pds_core_adminq_comp *comp)
161 {
162 	struct pdsc_queue *q = &qcq->q;
163 	struct pdsc_q_info *q_info;
164 	unsigned long irqflags;
165 	unsigned int avail;
166 	int index;
167 	int ret;
168 
169 	spin_lock_irqsave(&pdsc->adminq_lock, irqflags);
170 
171 	/* Check for space in the queue */
172 	avail = q->tail_idx;
173 	if (q->head_idx >= avail)
174 		avail += q->num_descs - q->head_idx - 1;
175 	else
176 		avail -= q->head_idx + 1;
177 	if (!avail) {
178 		ret = -ENOSPC;
179 		goto err_out_unlock;
180 	}
181 
182 	/* Check that the FW is running */
183 	if (!pdsc_is_fw_running(pdsc)) {
184 		if (pdsc->info_regs) {
185 			u8 fw_status =
186 				ioread8(&pdsc->info_regs->fw_status);
187 
188 			dev_info(pdsc->dev, "%s: post failed - fw not running %#02x:\n",
189 				 __func__, fw_status);
190 		} else {
191 			dev_info(pdsc->dev, "%s: post failed - BARs not setup\n",
192 				 __func__);
193 		}
194 		ret = -ENXIO;
195 
196 		goto err_out_unlock;
197 	}
198 
199 	/* Post the request */
200 	index = q->head_idx;
201 	q_info = &q->info[index];
202 	q_info->dest = comp;
203 	memcpy(q_info->desc, cmd, sizeof(*cmd));
204 	reinit_completion(&q_info->completion);
205 
206 	dev_dbg(pdsc->dev, "head_idx %d tail_idx %d\n",
207 		q->head_idx, q->tail_idx);
208 	dev_dbg(pdsc->dev, "post admin queue command:\n");
209 	dynamic_hex_dump("cmd ", DUMP_PREFIX_OFFSET, 16, 1,
210 			 cmd, sizeof(*cmd), true);
211 
212 	q->head_idx = (q->head_idx + 1) & (q->num_descs - 1);
213 
214 	pds_core_dbell_ring(pdsc->kern_dbpage,
215 			    q->hw_type, q->dbval | q->head_idx);
216 	ret = index;
217 
218 err_out_unlock:
219 	spin_unlock_irqrestore(&pdsc->adminq_lock, irqflags);
220 	return ret;
221 }
222 
pdsc_adminq_post(struct pdsc * pdsc,union pds_core_adminq_cmd * cmd,union pds_core_adminq_comp * comp,bool fast_poll)223 int pdsc_adminq_post(struct pdsc *pdsc,
224 		     union pds_core_adminq_cmd *cmd,
225 		     union pds_core_adminq_comp *comp,
226 		     bool fast_poll)
227 {
228 	unsigned long poll_interval = 200;
229 	unsigned long poll_jiffies;
230 	unsigned long time_limit;
231 	unsigned long time_start;
232 	unsigned long time_done;
233 	unsigned long remaining;
234 	struct completion *wc;
235 	int err = 0;
236 	int index;
237 
238 	if (!pdsc_adminq_inc_if_up(pdsc)) {
239 		dev_dbg(pdsc->dev, "%s: preventing adminq cmd %u\n",
240 			__func__, cmd->opcode);
241 		return -ENXIO;
242 	}
243 
244 	index = __pdsc_adminq_post(pdsc, &pdsc->adminqcq, cmd, comp);
245 	if (index < 0) {
246 		err = index;
247 		goto err_out;
248 	}
249 
250 	wc = &pdsc->adminqcq.q.info[index].completion;
251 	time_start = jiffies;
252 	time_limit = time_start + HZ * pdsc->devcmd_timeout;
253 	do {
254 		/* Timeslice the actual wait to catch IO errors etc early */
255 		poll_jiffies = usecs_to_jiffies(poll_interval);
256 		remaining = wait_for_completion_timeout(wc, poll_jiffies);
257 		if (remaining)
258 			break;
259 
260 		if (!pdsc_is_fw_running(pdsc)) {
261 			if (pdsc->info_regs) {
262 				u8 fw_status =
263 					ioread8(&pdsc->info_regs->fw_status);
264 
265 				dev_dbg(pdsc->dev, "%s: post wait failed - fw not running %#02x:\n",
266 					__func__, fw_status);
267 			} else {
268 				dev_dbg(pdsc->dev, "%s: post wait failed - BARs not setup\n",
269 					__func__);
270 			}
271 			err = -ENXIO;
272 			break;
273 		}
274 
275 		/* When fast_poll is not requested, prevent aggressive polling
276 		 * on failures due to timeouts by doing exponential back off.
277 		 */
278 		if (!fast_poll && poll_interval < PDSC_ADMINQ_MAX_POLL_INTERVAL)
279 			poll_interval <<= 1;
280 	} while (time_before(jiffies, time_limit));
281 	time_done = jiffies;
282 	dev_dbg(pdsc->dev, "%s: elapsed %d msecs\n",
283 		__func__, jiffies_to_msecs(time_done - time_start));
284 
285 	/* Check the results and clear an un-completed timeout */
286 	if (time_after_eq(time_done, time_limit) && !completion_done(wc)) {
287 		err = -ETIMEDOUT;
288 		complete(wc);
289 	}
290 
291 	dev_dbg(pdsc->dev, "read admin queue completion idx %d:\n", index);
292 	dynamic_hex_dump("comp ", DUMP_PREFIX_OFFSET, 16, 1,
293 			 comp, sizeof(*comp), true);
294 
295 	if (remaining && comp->status)
296 		err = pdsc_err_to_errno(comp->status);
297 
298 err_out:
299 	if (err) {
300 		dev_dbg(pdsc->dev, "%s: opcode %d status %d err %pe\n",
301 			__func__, cmd->opcode, comp->status, ERR_PTR(err));
302 		if (err == -ENXIO || err == -ETIMEDOUT)
303 			queue_work(pdsc->wq, &pdsc->health_work);
304 	}
305 
306 	refcount_dec(&pdsc->adminq_refcnt);
307 
308 	return err;
309 }
310 EXPORT_SYMBOL_GPL(pdsc_adminq_post);
311