1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2023 Advanced Micro Devices, Inc */
3
4 #include <linux/pci.h>
5 #include <linux/vmalloc.h>
6
7 #include "core.h"
8
9 static BLOCKING_NOTIFIER_HEAD(pds_notify_chain);
10
pdsc_register_notify(struct notifier_block * nb)11 int pdsc_register_notify(struct notifier_block *nb)
12 {
13 return blocking_notifier_chain_register(&pds_notify_chain, nb);
14 }
15 EXPORT_SYMBOL_GPL(pdsc_register_notify);
16
pdsc_unregister_notify(struct notifier_block * nb)17 void pdsc_unregister_notify(struct notifier_block *nb)
18 {
19 blocking_notifier_chain_unregister(&pds_notify_chain, nb);
20 }
21 EXPORT_SYMBOL_GPL(pdsc_unregister_notify);
22
pdsc_notify(unsigned long event,void * data)23 void pdsc_notify(unsigned long event, void *data)
24 {
25 blocking_notifier_call_chain(&pds_notify_chain, event, data);
26 }
27
pdsc_intr_free(struct pdsc * pdsc,int index)28 void pdsc_intr_free(struct pdsc *pdsc, int index)
29 {
30 struct pdsc_intr_info *intr_info;
31
32 if (index >= pdsc->nintrs || index < 0) {
33 WARN(true, "bad intr index %d\n", index);
34 return;
35 }
36
37 intr_info = &pdsc->intr_info[index];
38 if (!intr_info->vector)
39 return;
40 dev_dbg(pdsc->dev, "%s: idx %d vec %d name %s\n",
41 __func__, index, intr_info->vector, intr_info->name);
42
43 pds_core_intr_mask(&pdsc->intr_ctrl[index], PDS_CORE_INTR_MASK_SET);
44 pds_core_intr_clean(&pdsc->intr_ctrl[index]);
45
46 free_irq(intr_info->vector, intr_info->data);
47
48 memset(intr_info, 0, sizeof(*intr_info));
49 }
50
pdsc_intr_alloc(struct pdsc * pdsc,char * name,irq_handler_t handler,void * data)51 int pdsc_intr_alloc(struct pdsc *pdsc, char *name,
52 irq_handler_t handler, void *data)
53 {
54 struct pdsc_intr_info *intr_info;
55 unsigned int index;
56 int err;
57
58 /* Find the first available interrupt */
59 for (index = 0; index < pdsc->nintrs; index++)
60 if (!pdsc->intr_info[index].vector)
61 break;
62 if (index >= pdsc->nintrs) {
63 dev_warn(pdsc->dev, "%s: no intr, index=%d nintrs=%d\n",
64 __func__, index, pdsc->nintrs);
65 return -ENOSPC;
66 }
67
68 pds_core_intr_clean_flags(&pdsc->intr_ctrl[index],
69 PDS_CORE_INTR_CRED_RESET_COALESCE);
70
71 intr_info = &pdsc->intr_info[index];
72
73 intr_info->index = index;
74 intr_info->data = data;
75 strscpy(intr_info->name, name, sizeof(intr_info->name));
76
77 /* Get the OS vector number for the interrupt */
78 err = pci_irq_vector(pdsc->pdev, index);
79 if (err < 0) {
80 dev_err(pdsc->dev, "failed to get intr vector index %d: %pe\n",
81 index, ERR_PTR(err));
82 goto err_out_free_intr;
83 }
84 intr_info->vector = err;
85
86 /* Init the device's intr mask */
87 pds_core_intr_clean(&pdsc->intr_ctrl[index]);
88 pds_core_intr_mask_assert(&pdsc->intr_ctrl[index], 1);
89 pds_core_intr_mask(&pdsc->intr_ctrl[index], PDS_CORE_INTR_MASK_SET);
90
91 /* Register the isr with a name */
92 err = request_irq(intr_info->vector, handler, 0, intr_info->name, data);
93 if (err) {
94 dev_err(pdsc->dev, "failed to get intr irq vector %d: %pe\n",
95 intr_info->vector, ERR_PTR(err));
96 goto err_out_free_intr;
97 }
98
99 return index;
100
101 err_out_free_intr:
102 pdsc_intr_free(pdsc, index);
103 return err;
104 }
105
pdsc_qcq_intr_free(struct pdsc * pdsc,struct pdsc_qcq * qcq)106 static void pdsc_qcq_intr_free(struct pdsc *pdsc, struct pdsc_qcq *qcq)
107 {
108 if (!(qcq->flags & PDS_CORE_QCQ_F_INTR) ||
109 qcq->intx == PDS_CORE_INTR_INDEX_NOT_ASSIGNED)
110 return;
111
112 pdsc_intr_free(pdsc, qcq->intx);
113 qcq->intx = PDS_CORE_INTR_INDEX_NOT_ASSIGNED;
114 }
115
pdsc_qcq_intr_alloc(struct pdsc * pdsc,struct pdsc_qcq * qcq)116 static int pdsc_qcq_intr_alloc(struct pdsc *pdsc, struct pdsc_qcq *qcq)
117 {
118 char name[PDSC_INTR_NAME_MAX_SZ];
119 int index;
120
121 if (!(qcq->flags & PDS_CORE_QCQ_F_INTR)) {
122 qcq->intx = PDS_CORE_INTR_INDEX_NOT_ASSIGNED;
123 return 0;
124 }
125
126 snprintf(name, sizeof(name), "%s-%d-%s",
127 PDS_CORE_DRV_NAME, pdsc->pdev->bus->number, qcq->q.name);
128 index = pdsc_intr_alloc(pdsc, name, pdsc_adminq_isr, pdsc);
129 if (index < 0)
130 return index;
131 qcq->intx = index;
132 qcq->cq.bound_intr = &pdsc->intr_info[index];
133
134 return 0;
135 }
136
pdsc_qcq_free(struct pdsc * pdsc,struct pdsc_qcq * qcq)137 void pdsc_qcq_free(struct pdsc *pdsc, struct pdsc_qcq *qcq)
138 {
139 struct device *dev = pdsc->dev;
140
141 if (!(qcq && qcq->pdsc))
142 return;
143
144 pdsc_debugfs_del_qcq(qcq);
145
146 pdsc_qcq_intr_free(pdsc, qcq);
147
148 if (qcq->q_base)
149 dma_free_coherent(dev, qcq->q_size,
150 qcq->q_base, qcq->q_base_pa);
151
152 if (qcq->cq_base)
153 dma_free_coherent(dev, qcq->cq_size,
154 qcq->cq_base, qcq->cq_base_pa);
155
156 vfree(qcq->cq.info);
157 vfree(qcq->q.info);
158
159 memset(qcq, 0, sizeof(*qcq));
160 }
161
pdsc_q_map(struct pdsc_queue * q,void * base,dma_addr_t base_pa)162 static void pdsc_q_map(struct pdsc_queue *q, void *base, dma_addr_t base_pa)
163 {
164 struct pdsc_q_info *cur;
165 unsigned int i;
166
167 q->base = base;
168 q->base_pa = base_pa;
169
170 for (i = 0, cur = q->info; i < q->num_descs; i++, cur++) {
171 cur->desc = base + (i * q->desc_size);
172 init_completion(&cur->completion);
173 }
174 }
175
pdsc_cq_map(struct pdsc_cq * cq,void * base,dma_addr_t base_pa)176 static void pdsc_cq_map(struct pdsc_cq *cq, void *base, dma_addr_t base_pa)
177 {
178 struct pdsc_cq_info *cur;
179 unsigned int i;
180
181 cq->base = base;
182 cq->base_pa = base_pa;
183
184 for (i = 0, cur = cq->info; i < cq->num_descs; i++, cur++)
185 cur->comp = base + (i * cq->desc_size);
186 }
187
pdsc_qcq_alloc(struct pdsc * pdsc,unsigned int type,unsigned int index,const char * name,unsigned int flags,unsigned int num_descs,unsigned int desc_size,unsigned int cq_desc_size,unsigned int pid,struct pdsc_qcq * qcq)188 int pdsc_qcq_alloc(struct pdsc *pdsc, unsigned int type, unsigned int index,
189 const char *name, unsigned int flags, unsigned int num_descs,
190 unsigned int desc_size, unsigned int cq_desc_size,
191 unsigned int pid, struct pdsc_qcq *qcq)
192 {
193 struct device *dev = pdsc->dev;
194 void *q_base, *cq_base;
195 dma_addr_t cq_base_pa;
196 dma_addr_t q_base_pa;
197 int err;
198
199 qcq->q.info = vcalloc(num_descs, sizeof(*qcq->q.info));
200 if (!qcq->q.info) {
201 err = -ENOMEM;
202 goto err_out;
203 }
204
205 qcq->pdsc = pdsc;
206 qcq->flags = flags;
207 INIT_WORK(&qcq->work, pdsc_work_thread);
208
209 qcq->q.type = type;
210 qcq->q.index = index;
211 qcq->q.num_descs = num_descs;
212 qcq->q.desc_size = desc_size;
213 qcq->q.tail_idx = 0;
214 qcq->q.head_idx = 0;
215 qcq->q.pid = pid;
216 snprintf(qcq->q.name, sizeof(qcq->q.name), "%s%u", name, index);
217
218 err = pdsc_qcq_intr_alloc(pdsc, qcq);
219 if (err)
220 goto err_out_free_q_info;
221
222 qcq->cq.info = vcalloc(num_descs, sizeof(*qcq->cq.info));
223 if (!qcq->cq.info) {
224 err = -ENOMEM;
225 goto err_out_free_irq;
226 }
227
228 qcq->cq.num_descs = num_descs;
229 qcq->cq.desc_size = cq_desc_size;
230 qcq->cq.tail_idx = 0;
231 qcq->cq.done_color = 1;
232
233 if (flags & PDS_CORE_QCQ_F_NOTIFYQ) {
234 /* q & cq need to be contiguous in case of notifyq */
235 qcq->q_size = PDS_PAGE_SIZE +
236 ALIGN(num_descs * desc_size, PDS_PAGE_SIZE) +
237 ALIGN(num_descs * cq_desc_size, PDS_PAGE_SIZE);
238 qcq->q_base = dma_alloc_coherent(dev,
239 qcq->q_size + qcq->cq_size,
240 &qcq->q_base_pa,
241 GFP_KERNEL);
242 if (!qcq->q_base) {
243 err = -ENOMEM;
244 goto err_out_free_cq_info;
245 }
246 q_base = PTR_ALIGN(qcq->q_base, PDS_PAGE_SIZE);
247 q_base_pa = ALIGN(qcq->q_base_pa, PDS_PAGE_SIZE);
248 pdsc_q_map(&qcq->q, q_base, q_base_pa);
249
250 cq_base = PTR_ALIGN(q_base +
251 ALIGN(num_descs * desc_size, PDS_PAGE_SIZE),
252 PDS_PAGE_SIZE);
253 cq_base_pa = ALIGN(qcq->q_base_pa +
254 ALIGN(num_descs * desc_size, PDS_PAGE_SIZE),
255 PDS_PAGE_SIZE);
256
257 } else {
258 /* q DMA descriptors */
259 qcq->q_size = PDS_PAGE_SIZE + (num_descs * desc_size);
260 qcq->q_base = dma_alloc_coherent(dev, qcq->q_size,
261 &qcq->q_base_pa,
262 GFP_KERNEL);
263 if (!qcq->q_base) {
264 err = -ENOMEM;
265 goto err_out_free_cq_info;
266 }
267 q_base = PTR_ALIGN(qcq->q_base, PDS_PAGE_SIZE);
268 q_base_pa = ALIGN(qcq->q_base_pa, PDS_PAGE_SIZE);
269 pdsc_q_map(&qcq->q, q_base, q_base_pa);
270
271 /* cq DMA descriptors */
272 qcq->cq_size = PDS_PAGE_SIZE + (num_descs * cq_desc_size);
273 qcq->cq_base = dma_alloc_coherent(dev, qcq->cq_size,
274 &qcq->cq_base_pa,
275 GFP_KERNEL);
276 if (!qcq->cq_base) {
277 err = -ENOMEM;
278 goto err_out_free_q;
279 }
280 cq_base = PTR_ALIGN(qcq->cq_base, PDS_PAGE_SIZE);
281 cq_base_pa = ALIGN(qcq->cq_base_pa, PDS_PAGE_SIZE);
282 }
283
284 pdsc_cq_map(&qcq->cq, cq_base, cq_base_pa);
285 qcq->cq.bound_q = &qcq->q;
286
287 pdsc_debugfs_add_qcq(pdsc, qcq);
288
289 return 0;
290
291 err_out_free_q:
292 dma_free_coherent(dev, qcq->q_size, qcq->q_base, qcq->q_base_pa);
293 err_out_free_cq_info:
294 vfree(qcq->cq.info);
295 err_out_free_irq:
296 pdsc_qcq_intr_free(pdsc, qcq);
297 err_out_free_q_info:
298 vfree(qcq->q.info);
299 memset(qcq, 0, sizeof(*qcq));
300 err_out:
301 dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err);
302 return err;
303 }
304
pdsc_core_uninit(struct pdsc * pdsc)305 static void pdsc_core_uninit(struct pdsc *pdsc)
306 {
307 pdsc_qcq_free(pdsc, &pdsc->notifyqcq);
308 pdsc_qcq_free(pdsc, &pdsc->adminqcq);
309
310 if (pdsc->kern_dbpage) {
311 iounmap(pdsc->kern_dbpage);
312 pdsc->kern_dbpage = NULL;
313 }
314 }
315
pdsc_core_init(struct pdsc * pdsc)316 static int pdsc_core_init(struct pdsc *pdsc)
317 {
318 union pds_core_dev_comp comp = {};
319 union pds_core_dev_cmd cmd = {
320 .init.opcode = PDS_CORE_CMD_INIT,
321 };
322 struct pds_core_dev_init_data_out cido;
323 struct pds_core_dev_init_data_in cidi;
324 u32 dbid_count;
325 u32 dbpage_num;
326 int numdescs;
327 size_t sz;
328 int err;
329
330 numdescs = PDSC_ADMINQ_MAX_LENGTH;
331 err = pdsc_qcq_alloc(pdsc, PDS_CORE_QTYPE_ADMINQ, 0, "adminq",
332 PDS_CORE_QCQ_F_CORE | PDS_CORE_QCQ_F_INTR,
333 numdescs,
334 sizeof(union pds_core_adminq_cmd),
335 sizeof(union pds_core_adminq_comp),
336 0, &pdsc->adminqcq);
337 if (err)
338 return err;
339
340 err = pdsc_qcq_alloc(pdsc, PDS_CORE_QTYPE_NOTIFYQ, 0, "notifyq",
341 PDS_CORE_QCQ_F_NOTIFYQ,
342 PDSC_NOTIFYQ_LENGTH,
343 sizeof(struct pds_core_notifyq_cmd),
344 sizeof(union pds_core_notifyq_comp),
345 0, &pdsc->notifyqcq);
346 if (err)
347 goto err_out_uninit;
348
349 cidi.adminq_q_base = cpu_to_le64(pdsc->adminqcq.q_base_pa);
350 cidi.adminq_cq_base = cpu_to_le64(pdsc->adminqcq.cq_base_pa);
351 cidi.notifyq_cq_base = cpu_to_le64(pdsc->notifyqcq.cq.base_pa);
352 cidi.flags = cpu_to_le32(PDS_CORE_QINIT_F_IRQ | PDS_CORE_QINIT_F_ENA);
353 cidi.intr_index = cpu_to_le16(pdsc->adminqcq.intx);
354 cidi.adminq_ring_size = ilog2(pdsc->adminqcq.q.num_descs);
355 cidi.notifyq_ring_size = ilog2(pdsc->notifyqcq.q.num_descs);
356
357 mutex_lock(&pdsc->devcmd_lock);
358
359 sz = min_t(size_t, sizeof(cidi), sizeof(pdsc->cmd_regs->data));
360 memcpy_toio(&pdsc->cmd_regs->data, &cidi, sz);
361
362 err = pdsc_devcmd_locked(pdsc, &cmd, &comp, pdsc->devcmd_timeout);
363 if (!err) {
364 sz = min_t(size_t, sizeof(cido), sizeof(pdsc->cmd_regs->data));
365 memcpy_fromio(&cido, &pdsc->cmd_regs->data, sz);
366 }
367
368 mutex_unlock(&pdsc->devcmd_lock);
369 if (err) {
370 dev_err(pdsc->dev, "Device init command failed: %pe\n",
371 ERR_PTR(err));
372 goto err_out_uninit;
373 }
374
375 pdsc->hw_index = le32_to_cpu(cido.core_hw_index);
376
377 dbid_count = le32_to_cpu(pdsc->dev_ident.ndbpgs_per_lif);
378 dbpage_num = pdsc->hw_index * dbid_count;
379 pdsc->kern_dbpage = pdsc_map_dbpage(pdsc, dbpage_num);
380 if (!pdsc->kern_dbpage) {
381 dev_err(pdsc->dev, "Cannot map dbpage, aborting\n");
382 err = -ENOMEM;
383 goto err_out_uninit;
384 }
385
386 pdsc->adminqcq.q.hw_type = cido.adminq_hw_type;
387 pdsc->adminqcq.q.hw_index = le32_to_cpu(cido.adminq_hw_index);
388 pdsc->adminqcq.q.dbval = PDS_CORE_DBELL_QID(pdsc->adminqcq.q.hw_index);
389
390 pdsc->notifyqcq.q.hw_type = cido.notifyq_hw_type;
391 pdsc->notifyqcq.q.hw_index = le32_to_cpu(cido.notifyq_hw_index);
392 pdsc->notifyqcq.q.dbval = PDS_CORE_DBELL_QID(pdsc->notifyqcq.q.hw_index);
393
394 pdsc->last_eid = 0;
395
396 return 0;
397
398 err_out_uninit:
399 pdsc_core_uninit(pdsc);
400 return err;
401 }
402
403 static struct pdsc_viftype pdsc_viftype_defaults[] = {
404 [PDS_DEV_TYPE_FWCTL] = { .name = PDS_DEV_TYPE_FWCTL_STR,
405 .enabled = true,
406 .vif_id = PDS_DEV_TYPE_FWCTL,
407 .dl_id = -1 },
408 [PDS_DEV_TYPE_VDPA] = { .name = PDS_DEV_TYPE_VDPA_STR,
409 .vif_id = PDS_DEV_TYPE_VDPA,
410 .dl_id = DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET },
411 [PDS_DEV_TYPE_MAX] = {}
412 };
413
pdsc_viftypes_init(struct pdsc * pdsc)414 static int pdsc_viftypes_init(struct pdsc *pdsc)
415 {
416 enum pds_core_vif_types vt;
417
418 pdsc->viftype_status = kzalloc_objs(*pdsc->viftype_status,
419 ARRAY_SIZE(pdsc_viftype_defaults));
420 if (!pdsc->viftype_status)
421 return -ENOMEM;
422
423 for (vt = 0; vt < PDS_DEV_TYPE_MAX; vt++) {
424 bool vt_support;
425
426 if (!pdsc_viftype_defaults[vt].name)
427 continue;
428
429 /* Grab the defaults */
430 pdsc->viftype_status[vt] = pdsc_viftype_defaults[vt];
431
432 /* See what the Core device has for support */
433 vt_support = !!le16_to_cpu(pdsc->dev_ident.vif_types[vt]);
434
435 dev_dbg(pdsc->dev, "VIF %s is %ssupported\n",
436 pdsc->viftype_status[vt].name,
437 vt_support ? "" : "not ");
438
439 pdsc->viftype_status[vt].supported = vt_support;
440 }
441
442 return 0;
443 }
444
pdsc_setup(struct pdsc * pdsc,bool init)445 int pdsc_setup(struct pdsc *pdsc, bool init)
446 {
447 int err;
448
449 err = pdsc_dev_init(pdsc);
450 if (err)
451 return err;
452
453 /* Set up the Core with the AdminQ and NotifyQ info */
454 err = pdsc_core_init(pdsc);
455 if (err)
456 goto err_out_teardown;
457
458 /* Set up the VIFs */
459 if (init) {
460 err = pdsc_viftypes_init(pdsc);
461 if (err)
462 goto err_out_teardown;
463
464 pdsc_debugfs_add_viftype(pdsc);
465 }
466
467 refcount_set(&pdsc->adminq_refcnt, 1);
468 clear_bit(PDSC_S_FW_DEAD, &pdsc->state);
469 return 0;
470
471 err_out_teardown:
472 pdsc_teardown(pdsc, init);
473 return err;
474 }
475
pdsc_teardown(struct pdsc * pdsc,bool removing)476 void pdsc_teardown(struct pdsc *pdsc, bool removing)
477 {
478 if (!pdsc->pdev->is_virtfn)
479 pdsc_devcmd_reset(pdsc);
480 if (pdsc->adminqcq.work.func)
481 cancel_work_sync(&pdsc->adminqcq.work);
482
483 pdsc_core_uninit(pdsc);
484
485 if (removing) {
486 kfree(pdsc->viftype_status);
487 pdsc->viftype_status = NULL;
488 }
489
490 pdsc_dev_uninit(pdsc);
491
492 set_bit(PDSC_S_FW_DEAD, &pdsc->state);
493 }
494
pdsc_start(struct pdsc * pdsc)495 int pdsc_start(struct pdsc *pdsc)
496 {
497 pds_core_intr_mask(&pdsc->intr_ctrl[pdsc->adminqcq.intx],
498 PDS_CORE_INTR_MASK_CLEAR);
499
500 return 0;
501 }
502
pdsc_stop(struct pdsc * pdsc)503 void pdsc_stop(struct pdsc *pdsc)
504 {
505 int i;
506
507 if (!pdsc->intr_info)
508 return;
509
510 /* Mask interrupts that are in use */
511 for (i = 0; i < pdsc->nintrs; i++)
512 if (pdsc->intr_info[i].vector)
513 pds_core_intr_mask(&pdsc->intr_ctrl[i],
514 PDS_CORE_INTR_MASK_SET);
515 }
516
pdsc_adminq_wait_and_dec_once_unused(struct pdsc * pdsc)517 static void pdsc_adminq_wait_and_dec_once_unused(struct pdsc *pdsc)
518 {
519 /* The driver initializes the adminq_refcnt to 1 when the adminq is
520 * allocated and ready for use. Other users/requesters will increment
521 * the refcnt while in use. If the refcnt is down to 1 then the adminq
522 * is not in use and the refcnt can be cleared and adminq freed. Before
523 * calling this function the driver will set PDSC_S_FW_DEAD, which
524 * prevent subsequent attempts to use the adminq and increment the
525 * refcnt to fail. This guarantees that this function will eventually
526 * exit.
527 */
528 while (!refcount_dec_if_one(&pdsc->adminq_refcnt)) {
529 dev_dbg_ratelimited(pdsc->dev, "%s: adminq in use\n",
530 __func__);
531 cpu_relax();
532 }
533 }
534
pdsc_fw_down(struct pdsc * pdsc)535 void pdsc_fw_down(struct pdsc *pdsc)
536 {
537 union pds_core_notifyq_comp reset_event = {
538 .reset.ecode = cpu_to_le16(PDS_EVENT_RESET),
539 .reset.state = 0,
540 };
541
542 if (test_and_set_bit(PDSC_S_FW_DEAD, &pdsc->state)) {
543 dev_warn(pdsc->dev, "%s: already happening\n", __func__);
544 return;
545 }
546
547 if (pdsc->pdev->is_virtfn)
548 return;
549
550 pdsc_adminq_wait_and_dec_once_unused(pdsc);
551
552 /* Notify clients of fw_down */
553 if (pdsc->fw_reporter)
554 devlink_health_report(pdsc->fw_reporter, "FW down reported", pdsc);
555 pdsc_notify(PDS_EVENT_RESET, &reset_event);
556
557 pdsc_stop(pdsc);
558 pdsc_teardown(pdsc, PDSC_TEARDOWN_RECOVERY);
559 }
560
pdsc_fw_up(struct pdsc * pdsc)561 void pdsc_fw_up(struct pdsc *pdsc)
562 {
563 union pds_core_notifyq_comp reset_event = {
564 .reset.ecode = cpu_to_le16(PDS_EVENT_RESET),
565 .reset.state = 1,
566 };
567 int err;
568
569 if (!test_bit(PDSC_S_FW_DEAD, &pdsc->state)) {
570 dev_err(pdsc->dev, "%s: fw not dead\n", __func__);
571 return;
572 }
573
574 if (pdsc->pdev->is_virtfn) {
575 clear_bit(PDSC_S_FW_DEAD, &pdsc->state);
576 return;
577 }
578
579 err = pdsc_setup(pdsc, PDSC_SETUP_RECOVERY);
580 if (err)
581 goto err_out;
582
583 err = pdsc_start(pdsc);
584 if (err)
585 goto err_out;
586
587 /* Notify clients of fw_up */
588 pdsc->fw_recoveries++;
589 if (pdsc->fw_reporter)
590 devlink_health_reporter_state_update(pdsc->fw_reporter,
591 DEVLINK_HEALTH_REPORTER_STATE_HEALTHY);
592 pdsc_notify(PDS_EVENT_RESET, &reset_event);
593
594 return;
595
596 err_out:
597 pdsc_teardown(pdsc, PDSC_TEARDOWN_RECOVERY);
598 }
599
pdsc_pci_reset_thread(struct work_struct * work)600 void pdsc_pci_reset_thread(struct work_struct *work)
601 {
602 struct pdsc *pdsc = container_of(work, struct pdsc, pci_reset_work);
603 struct pci_dev *pdev = pdsc->pdev;
604
605 pci_dev_get(pdev);
606 pci_reset_function(pdev);
607 pci_dev_put(pdev);
608 }
609
pdsc_check_pci_health(struct pdsc * pdsc)610 static void pdsc_check_pci_health(struct pdsc *pdsc)
611 {
612 u8 fw_status;
613
614 /* some sort of teardown already in progress */
615 if (!pdsc->info_regs)
616 return;
617
618 fw_status = ioread8(&pdsc->info_regs->fw_status);
619
620 /* is PCI broken? */
621 if (fw_status != PDS_RC_BAD_PCI)
622 return;
623
624 /* prevent deadlock between pdsc_reset_prepare and pdsc_health_thread */
625 queue_work(pdsc->wq, &pdsc->pci_reset_work);
626 }
627
pdsc_health_thread(struct work_struct * work)628 void pdsc_health_thread(struct work_struct *work)
629 {
630 struct pdsc *pdsc = container_of(work, struct pdsc, health_work);
631 unsigned long mask;
632 bool healthy;
633
634 mutex_lock(&pdsc->config_lock);
635
636 /* Don't do a check when in a transition state */
637 mask = BIT_ULL(PDSC_S_INITING_DRIVER) |
638 BIT_ULL(PDSC_S_STOPPING_DRIVER);
639 if (pdsc->state & mask)
640 goto out_unlock;
641
642 healthy = pdsc_is_fw_good(pdsc);
643 dev_dbg(pdsc->dev, "%s: health %d fw_status %#02x fw_heartbeat %d\n",
644 __func__, healthy, pdsc->fw_status, pdsc->last_hb);
645
646 if (test_bit(PDSC_S_FW_DEAD, &pdsc->state)) {
647 if (healthy)
648 pdsc_fw_up(pdsc);
649 } else {
650 if (!healthy)
651 pdsc_fw_down(pdsc);
652 }
653
654 pdsc_check_pci_health(pdsc);
655
656 pdsc->fw_generation = pdsc->fw_status & PDS_CORE_FW_STS_F_GENERATION;
657
658 out_unlock:
659 mutex_unlock(&pdsc->config_lock);
660 }
661