xref: /linux/drivers/pci/doe.c (revision 36ec807b627b4c0a0a382f0ae48eac7187d14b2b)
19d24322eSJonathan Cameron // SPDX-License-Identifier: GPL-2.0
29d24322eSJonathan Cameron /*
39d24322eSJonathan Cameron  * Data Object Exchange
49d24322eSJonathan Cameron  *	PCIe r6.0, sec 6.30 DOE
59d24322eSJonathan Cameron  *
69d24322eSJonathan Cameron  * Copyright (C) 2021 Huawei
79d24322eSJonathan Cameron  *	Jonathan Cameron <Jonathan.Cameron@huawei.com>
89d24322eSJonathan Cameron  *
99d24322eSJonathan Cameron  * Copyright (C) 2022 Intel Corporation
109d24322eSJonathan Cameron  *	Ira Weiny <ira.weiny@intel.com>
119d24322eSJonathan Cameron  */
129d24322eSJonathan Cameron 
139d24322eSJonathan Cameron #define dev_fmt(fmt) "DOE: " fmt
149d24322eSJonathan Cameron 
159d24322eSJonathan Cameron #include <linux/bitfield.h>
169d24322eSJonathan Cameron #include <linux/delay.h>
179d24322eSJonathan Cameron #include <linux/jiffies.h>
189d24322eSJonathan Cameron #include <linux/mutex.h>
199d24322eSJonathan Cameron #include <linux/pci.h>
209d24322eSJonathan Cameron #include <linux/pci-doe.h>
219d24322eSJonathan Cameron #include <linux/workqueue.h>
229d24322eSJonathan Cameron 
23ac048403SLukas Wunner #include "pci.h"
24ac048403SLukas Wunner 
259d24322eSJonathan Cameron #define PCI_DOE_PROTOCOL_DISCOVERY 0
269d24322eSJonathan Cameron 
279d24322eSJonathan Cameron /* Timeout of 1 second from 6.30.2 Operation, PCI Spec r6.0 */
289d24322eSJonathan Cameron #define PCI_DOE_TIMEOUT HZ
299d24322eSJonathan Cameron #define PCI_DOE_POLL_INTERVAL	(PCI_DOE_TIMEOUT / 128)
309d24322eSJonathan Cameron 
319d24322eSJonathan Cameron #define PCI_DOE_FLAG_CANCEL	0
329d24322eSJonathan Cameron #define PCI_DOE_FLAG_DEAD	1
339d24322eSJonathan Cameron 
34a4ff8e7aSLi Ming /* Max data object length is 2^18 dwords */
35a4ff8e7aSLi Ming #define PCI_DOE_MAX_LENGTH	(1 << 18)
36a4ff8e7aSLi Ming 
379d24322eSJonathan Cameron /**
389d24322eSJonathan Cameron  * struct pci_doe_mb - State for a single DOE mailbox
399d24322eSJonathan Cameron  *
409d24322eSJonathan Cameron  * This state is used to manage a single DOE mailbox capability.  All fields
419d24322eSJonathan Cameron  * should be considered opaque to the consumers and the structure passed into
42022b66f3SLukas Wunner  * the helpers below after being created by pci_doe_create_mb().
439d24322eSJonathan Cameron  *
449d24322eSJonathan Cameron  * @pdev: PCI device this mailbox belongs to
459d24322eSJonathan Cameron  * @cap_offset: Capability offset
469d24322eSJonathan Cameron  * @prots: Array of protocols supported (encoded as long values)
479d24322eSJonathan Cameron  * @wq: Wait queue for work item
489d24322eSJonathan Cameron  * @work_queue: Queue of pci_doe_work items
499d24322eSJonathan Cameron  * @flags: Bit array of PCI_DOE_FLAG_* flags
509d24322eSJonathan Cameron  */
519d24322eSJonathan Cameron struct pci_doe_mb {
529d24322eSJonathan Cameron 	struct pci_dev *pdev;
539d24322eSJonathan Cameron 	u16 cap_offset;
549d24322eSJonathan Cameron 	struct xarray prots;
559d24322eSJonathan Cameron 
569d24322eSJonathan Cameron 	wait_queue_head_t wq;
579d24322eSJonathan Cameron 	struct workqueue_struct *work_queue;
589d24322eSJonathan Cameron 	unsigned long flags;
599d24322eSJonathan Cameron };
609d24322eSJonathan Cameron 
610821ff8eSLukas Wunner struct pci_doe_protocol {
620821ff8eSLukas Wunner 	u16 vid;
630821ff8eSLukas Wunner 	u8 type;
640821ff8eSLukas Wunner };
650821ff8eSLukas Wunner 
660821ff8eSLukas Wunner /**
670821ff8eSLukas Wunner  * struct pci_doe_task - represents a single query/response
680821ff8eSLukas Wunner  *
690821ff8eSLukas Wunner  * @prot: DOE Protocol
700821ff8eSLukas Wunner  * @request_pl: The request payload
710821ff8eSLukas Wunner  * @request_pl_sz: Size of the request payload (bytes)
720821ff8eSLukas Wunner  * @response_pl: The response payload
730821ff8eSLukas Wunner  * @response_pl_sz: Size of the response payload (bytes)
740821ff8eSLukas Wunner  * @rv: Return value.  Length of received response or error (bytes)
750821ff8eSLukas Wunner  * @complete: Called when task is complete
760821ff8eSLukas Wunner  * @private: Private data for the consumer
770821ff8eSLukas Wunner  * @work: Used internally by the mailbox
780821ff8eSLukas Wunner  * @doe_mb: Used internally by the mailbox
790821ff8eSLukas Wunner  */
800821ff8eSLukas Wunner struct pci_doe_task {
810821ff8eSLukas Wunner 	struct pci_doe_protocol prot;
820821ff8eSLukas Wunner 	const __le32 *request_pl;
830821ff8eSLukas Wunner 	size_t request_pl_sz;
840821ff8eSLukas Wunner 	__le32 *response_pl;
850821ff8eSLukas Wunner 	size_t response_pl_sz;
860821ff8eSLukas Wunner 	int rv;
870821ff8eSLukas Wunner 	void (*complete)(struct pci_doe_task *task);
880821ff8eSLukas Wunner 	void *private;
890821ff8eSLukas Wunner 
900821ff8eSLukas Wunner 	/* initialized by pci_doe_submit_task() */
910821ff8eSLukas Wunner 	struct work_struct work;
920821ff8eSLukas Wunner 	struct pci_doe_mb *doe_mb;
930821ff8eSLukas Wunner };
940821ff8eSLukas Wunner 
959d24322eSJonathan Cameron static int pci_doe_wait(struct pci_doe_mb *doe_mb, unsigned long timeout)
969d24322eSJonathan Cameron {
979d24322eSJonathan Cameron 	if (wait_event_timeout(doe_mb->wq,
989d24322eSJonathan Cameron 			       test_bit(PCI_DOE_FLAG_CANCEL, &doe_mb->flags),
999d24322eSJonathan Cameron 			       timeout))
1009d24322eSJonathan Cameron 		return -EIO;
1019d24322eSJonathan Cameron 	return 0;
1029d24322eSJonathan Cameron }
1039d24322eSJonathan Cameron 
1049d24322eSJonathan Cameron static void pci_doe_write_ctrl(struct pci_doe_mb *doe_mb, u32 val)
1059d24322eSJonathan Cameron {
1069d24322eSJonathan Cameron 	struct pci_dev *pdev = doe_mb->pdev;
1079d24322eSJonathan Cameron 	int offset = doe_mb->cap_offset;
1089d24322eSJonathan Cameron 
1099d24322eSJonathan Cameron 	pci_write_config_dword(pdev, offset + PCI_DOE_CTRL, val);
1109d24322eSJonathan Cameron }
1119d24322eSJonathan Cameron 
1129d24322eSJonathan Cameron static int pci_doe_abort(struct pci_doe_mb *doe_mb)
1139d24322eSJonathan Cameron {
1149d24322eSJonathan Cameron 	struct pci_dev *pdev = doe_mb->pdev;
1159d24322eSJonathan Cameron 	int offset = doe_mb->cap_offset;
1169d24322eSJonathan Cameron 	unsigned long timeout_jiffies;
1179d24322eSJonathan Cameron 
1189d24322eSJonathan Cameron 	pci_dbg(pdev, "[%x] Issuing Abort\n", offset);
1199d24322eSJonathan Cameron 
1209d24322eSJonathan Cameron 	timeout_jiffies = jiffies + PCI_DOE_TIMEOUT;
1219d24322eSJonathan Cameron 	pci_doe_write_ctrl(doe_mb, PCI_DOE_CTRL_ABORT);
1229d24322eSJonathan Cameron 
1239d24322eSJonathan Cameron 	do {
1249d24322eSJonathan Cameron 		int rc;
1259d24322eSJonathan Cameron 		u32 val;
1269d24322eSJonathan Cameron 
1279d24322eSJonathan Cameron 		rc = pci_doe_wait(doe_mb, PCI_DOE_POLL_INTERVAL);
1289d24322eSJonathan Cameron 		if (rc)
1299d24322eSJonathan Cameron 			return rc;
1309d24322eSJonathan Cameron 		pci_read_config_dword(pdev, offset + PCI_DOE_STATUS, &val);
1319d24322eSJonathan Cameron 
1329d24322eSJonathan Cameron 		/* Abort success! */
1339d24322eSJonathan Cameron 		if (!FIELD_GET(PCI_DOE_STATUS_ERROR, val) &&
1349d24322eSJonathan Cameron 		    !FIELD_GET(PCI_DOE_STATUS_BUSY, val))
1359d24322eSJonathan Cameron 			return 0;
1369d24322eSJonathan Cameron 
1379d24322eSJonathan Cameron 	} while (!time_after(jiffies, timeout_jiffies));
1389d24322eSJonathan Cameron 
1399d24322eSJonathan Cameron 	/* Abort has timed out and the MB is dead */
1409d24322eSJonathan Cameron 	pci_err(pdev, "[%x] ABORT timed out\n", offset);
1419d24322eSJonathan Cameron 	return -EIO;
1429d24322eSJonathan Cameron }
1439d24322eSJonathan Cameron 
1449d24322eSJonathan Cameron static int pci_doe_send_req(struct pci_doe_mb *doe_mb,
1459d24322eSJonathan Cameron 			    struct pci_doe_task *task)
1469d24322eSJonathan Cameron {
1479d24322eSJonathan Cameron 	struct pci_dev *pdev = doe_mb->pdev;
1489d24322eSJonathan Cameron 	int offset = doe_mb->cap_offset;
149cedf8d8aSLukas Wunner 	size_t length, remainder;
1509d24322eSJonathan Cameron 	u32 val;
1519d24322eSJonathan Cameron 	int i;
1529d24322eSJonathan Cameron 
1539d24322eSJonathan Cameron 	/*
1549d24322eSJonathan Cameron 	 * Check the DOE busy bit is not set. If it is set, this could indicate
1559d24322eSJonathan Cameron 	 * someone other than Linux (e.g. firmware) is using the mailbox. Note
1569d24322eSJonathan Cameron 	 * it is expected that firmware and OS will negotiate access rights via
1579d24322eSJonathan Cameron 	 * an, as yet to be defined, method.
1589d24322eSJonathan Cameron 	 */
1599d24322eSJonathan Cameron 	pci_read_config_dword(pdev, offset + PCI_DOE_STATUS, &val);
1609d24322eSJonathan Cameron 	if (FIELD_GET(PCI_DOE_STATUS_BUSY, val))
1619d24322eSJonathan Cameron 		return -EBUSY;
1629d24322eSJonathan Cameron 
1639d24322eSJonathan Cameron 	if (FIELD_GET(PCI_DOE_STATUS_ERROR, val))
1649d24322eSJonathan Cameron 		return -EIO;
1659d24322eSJonathan Cameron 
166a4ff8e7aSLi Ming 	/* Length is 2 DW of header + length of payload in DW */
167cedf8d8aSLukas Wunner 	length = 2 + DIV_ROUND_UP(task->request_pl_sz, sizeof(__le32));
168a4ff8e7aSLi Ming 	if (length > PCI_DOE_MAX_LENGTH)
169a4ff8e7aSLi Ming 		return -EIO;
170a4ff8e7aSLi Ming 	if (length == PCI_DOE_MAX_LENGTH)
171a4ff8e7aSLi Ming 		length = 0;
172a4ff8e7aSLi Ming 
1739d24322eSJonathan Cameron 	/* Write DOE Header */
1749d24322eSJonathan Cameron 	val = FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_1_VID, task->prot.vid) |
1759d24322eSJonathan Cameron 		FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_1_TYPE, task->prot.type);
1769d24322eSJonathan Cameron 	pci_write_config_dword(pdev, offset + PCI_DOE_WRITE, val);
1779d24322eSJonathan Cameron 	pci_write_config_dword(pdev, offset + PCI_DOE_WRITE,
1789d24322eSJonathan Cameron 			       FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_2_LENGTH,
179a4ff8e7aSLi Ming 					  length));
180cedf8d8aSLukas Wunner 
181cedf8d8aSLukas Wunner 	/* Write payload */
182fbaa3821SLukas Wunner 	for (i = 0; i < task->request_pl_sz / sizeof(__le32); i++)
1839d24322eSJonathan Cameron 		pci_write_config_dword(pdev, offset + PCI_DOE_WRITE,
184fbaa3821SLukas Wunner 				       le32_to_cpu(task->request_pl[i]));
1859d24322eSJonathan Cameron 
186cedf8d8aSLukas Wunner 	/* Write last payload dword */
187cedf8d8aSLukas Wunner 	remainder = task->request_pl_sz % sizeof(__le32);
188cedf8d8aSLukas Wunner 	if (remainder) {
189cedf8d8aSLukas Wunner 		val = 0;
190cedf8d8aSLukas Wunner 		memcpy(&val, &task->request_pl[i], remainder);
191cedf8d8aSLukas Wunner 		le32_to_cpus(&val);
192cedf8d8aSLukas Wunner 		pci_write_config_dword(pdev, offset + PCI_DOE_WRITE, val);
193cedf8d8aSLukas Wunner 	}
194cedf8d8aSLukas Wunner 
1959d24322eSJonathan Cameron 	pci_doe_write_ctrl(doe_mb, PCI_DOE_CTRL_GO);
1969d24322eSJonathan Cameron 
1979d24322eSJonathan Cameron 	return 0;
1989d24322eSJonathan Cameron }
1999d24322eSJonathan Cameron 
2009d24322eSJonathan Cameron static bool pci_doe_data_obj_ready(struct pci_doe_mb *doe_mb)
2019d24322eSJonathan Cameron {
2029d24322eSJonathan Cameron 	struct pci_dev *pdev = doe_mb->pdev;
2039d24322eSJonathan Cameron 	int offset = doe_mb->cap_offset;
2049d24322eSJonathan Cameron 	u32 val;
2059d24322eSJonathan Cameron 
2069d24322eSJonathan Cameron 	pci_read_config_dword(pdev, offset + PCI_DOE_STATUS, &val);
2079d24322eSJonathan Cameron 	if (FIELD_GET(PCI_DOE_STATUS_DATA_OBJECT_READY, val))
2089d24322eSJonathan Cameron 		return true;
2099d24322eSJonathan Cameron 	return false;
2109d24322eSJonathan Cameron }
2119d24322eSJonathan Cameron 
2129d24322eSJonathan Cameron static int pci_doe_recv_resp(struct pci_doe_mb *doe_mb, struct pci_doe_task *task)
2139d24322eSJonathan Cameron {
214cedf8d8aSLukas Wunner 	size_t length, payload_length, remainder, received;
2159d24322eSJonathan Cameron 	struct pci_dev *pdev = doe_mb->pdev;
2169d24322eSJonathan Cameron 	int offset = doe_mb->cap_offset;
217cedf8d8aSLukas Wunner 	int i = 0;
2189d24322eSJonathan Cameron 	u32 val;
2199d24322eSJonathan Cameron 
2209d24322eSJonathan Cameron 	/* Read the first dword to get the protocol */
2219d24322eSJonathan Cameron 	pci_read_config_dword(pdev, offset + PCI_DOE_READ, &val);
2229d24322eSJonathan Cameron 	if ((FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_VID, val) != task->prot.vid) ||
2239d24322eSJonathan Cameron 	    (FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_TYPE, val) != task->prot.type)) {
2249d24322eSJonathan Cameron 		dev_err_ratelimited(&pdev->dev, "[%x] expected [VID, Protocol] = [%04x, %02x], got [%04x, %02x]\n",
2259d24322eSJonathan Cameron 				    doe_mb->cap_offset, task->prot.vid, task->prot.type,
2269d24322eSJonathan Cameron 				    FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_VID, val),
2279d24322eSJonathan Cameron 				    FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_TYPE, val));
2289d24322eSJonathan Cameron 		return -EIO;
2299d24322eSJonathan Cameron 	}
2309d24322eSJonathan Cameron 
2319d24322eSJonathan Cameron 	pci_write_config_dword(pdev, offset + PCI_DOE_READ, 0);
2329d24322eSJonathan Cameron 	/* Read the second dword to get the length */
2339d24322eSJonathan Cameron 	pci_read_config_dword(pdev, offset + PCI_DOE_READ, &val);
2349d24322eSJonathan Cameron 	pci_write_config_dword(pdev, offset + PCI_DOE_READ, 0);
2359d24322eSJonathan Cameron 
2369d24322eSJonathan Cameron 	length = FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_2_LENGTH, val);
237a4ff8e7aSLi Ming 	/* A value of 0x0 indicates max data object length */
238a4ff8e7aSLi Ming 	if (!length)
239a4ff8e7aSLi Ming 		length = PCI_DOE_MAX_LENGTH;
240a4ff8e7aSLi Ming 	if (length < 2)
2419d24322eSJonathan Cameron 		return -EIO;
2429d24322eSJonathan Cameron 
2439d24322eSJonathan Cameron 	/* First 2 dwords have already been read */
2449d24322eSJonathan Cameron 	length -= 2;
245cedf8d8aSLukas Wunner 	received = task->response_pl_sz;
246cedf8d8aSLukas Wunner 	payload_length = DIV_ROUND_UP(task->response_pl_sz, sizeof(__le32));
247cedf8d8aSLukas Wunner 	remainder = task->response_pl_sz % sizeof(__le32);
248cedf8d8aSLukas Wunner 
249cedf8d8aSLukas Wunner 	/* remainder signifies number of data bytes in last payload dword */
250cedf8d8aSLukas Wunner 	if (!remainder)
251cedf8d8aSLukas Wunner 		remainder = sizeof(__le32);
252cedf8d8aSLukas Wunner 
253cedf8d8aSLukas Wunner 	if (length < payload_length) {
254cedf8d8aSLukas Wunner 		received = length * sizeof(__le32);
255cedf8d8aSLukas Wunner 		payload_length = length;
256cedf8d8aSLukas Wunner 		remainder = sizeof(__le32);
257cedf8d8aSLukas Wunner 	}
258cedf8d8aSLukas Wunner 
259cedf8d8aSLukas Wunner 	if (payload_length) {
260cedf8d8aSLukas Wunner 		/* Read all payload dwords except the last */
261cedf8d8aSLukas Wunner 		for (; i < payload_length - 1; i++) {
262cedf8d8aSLukas Wunner 			pci_read_config_dword(pdev, offset + PCI_DOE_READ,
263cedf8d8aSLukas Wunner 					      &val);
264fbaa3821SLukas Wunner 			task->response_pl[i] = cpu_to_le32(val);
265cedf8d8aSLukas Wunner 			pci_write_config_dword(pdev, offset + PCI_DOE_READ, 0);
266cedf8d8aSLukas Wunner 		}
267cedf8d8aSLukas Wunner 
268cedf8d8aSLukas Wunner 		/* Read last payload dword */
269cedf8d8aSLukas Wunner 		pci_read_config_dword(pdev, offset + PCI_DOE_READ, &val);
270cedf8d8aSLukas Wunner 		cpu_to_le32s(&val);
271cedf8d8aSLukas Wunner 		memcpy(&task->response_pl[i], &val, remainder);
2729d24322eSJonathan Cameron 		/* Prior to the last ack, ensure Data Object Ready */
273cedf8d8aSLukas Wunner 		if (!pci_doe_data_obj_ready(doe_mb))
2749d24322eSJonathan Cameron 			return -EIO;
2759d24322eSJonathan Cameron 		pci_write_config_dword(pdev, offset + PCI_DOE_READ, 0);
276cedf8d8aSLukas Wunner 		i++;
2779d24322eSJonathan Cameron 	}
2789d24322eSJonathan Cameron 
2799d24322eSJonathan Cameron 	/* Flush excess length */
2809d24322eSJonathan Cameron 	for (; i < length; i++) {
2819d24322eSJonathan Cameron 		pci_read_config_dword(pdev, offset + PCI_DOE_READ, &val);
2829d24322eSJonathan Cameron 		pci_write_config_dword(pdev, offset + PCI_DOE_READ, 0);
2839d24322eSJonathan Cameron 	}
2849d24322eSJonathan Cameron 
2859d24322eSJonathan Cameron 	/* Final error check to pick up on any since Data Object Ready */
2869d24322eSJonathan Cameron 	pci_read_config_dword(pdev, offset + PCI_DOE_STATUS, &val);
2879d24322eSJonathan Cameron 	if (FIELD_GET(PCI_DOE_STATUS_ERROR, val))
2889d24322eSJonathan Cameron 		return -EIO;
2899d24322eSJonathan Cameron 
290cedf8d8aSLukas Wunner 	return received;
2919d24322eSJonathan Cameron }
2929d24322eSJonathan Cameron 
2939d24322eSJonathan Cameron static void signal_task_complete(struct pci_doe_task *task, int rv)
2949d24322eSJonathan Cameron {
2959d24322eSJonathan Cameron 	task->rv = rv;
296abf04be0SLukas Wunner 	destroy_work_on_stack(&task->work);
297e3a3a097SIra Weiny 	task->complete(task);
2989d24322eSJonathan Cameron }
2999d24322eSJonathan Cameron 
3009d24322eSJonathan Cameron static void signal_task_abort(struct pci_doe_task *task, int rv)
3019d24322eSJonathan Cameron {
3029d24322eSJonathan Cameron 	struct pci_doe_mb *doe_mb = task->doe_mb;
3039d24322eSJonathan Cameron 	struct pci_dev *pdev = doe_mb->pdev;
3049d24322eSJonathan Cameron 
3059d24322eSJonathan Cameron 	if (pci_doe_abort(doe_mb)) {
3069d24322eSJonathan Cameron 		/*
3079d24322eSJonathan Cameron 		 * If the device can't process an abort; set the mailbox dead
3089d24322eSJonathan Cameron 		 *	- no more submissions
3099d24322eSJonathan Cameron 		 */
3109d24322eSJonathan Cameron 		pci_err(pdev, "[%x] Abort failed marking mailbox dead\n",
3119d24322eSJonathan Cameron 			doe_mb->cap_offset);
3129d24322eSJonathan Cameron 		set_bit(PCI_DOE_FLAG_DEAD, &doe_mb->flags);
3139d24322eSJonathan Cameron 	}
3149d24322eSJonathan Cameron 	signal_task_complete(task, rv);
3159d24322eSJonathan Cameron }
3169d24322eSJonathan Cameron 
3179d24322eSJonathan Cameron static void doe_statemachine_work(struct work_struct *work)
3189d24322eSJonathan Cameron {
3199d24322eSJonathan Cameron 	struct pci_doe_task *task = container_of(work, struct pci_doe_task,
3209d24322eSJonathan Cameron 						 work);
3219d24322eSJonathan Cameron 	struct pci_doe_mb *doe_mb = task->doe_mb;
3229d24322eSJonathan Cameron 	struct pci_dev *pdev = doe_mb->pdev;
3239d24322eSJonathan Cameron 	int offset = doe_mb->cap_offset;
3249d24322eSJonathan Cameron 	unsigned long timeout_jiffies;
3259d24322eSJonathan Cameron 	u32 val;
3269d24322eSJonathan Cameron 	int rc;
3279d24322eSJonathan Cameron 
3289d24322eSJonathan Cameron 	if (test_bit(PCI_DOE_FLAG_DEAD, &doe_mb->flags)) {
3299d24322eSJonathan Cameron 		signal_task_complete(task, -EIO);
3309d24322eSJonathan Cameron 		return;
3319d24322eSJonathan Cameron 	}
3329d24322eSJonathan Cameron 
3339d24322eSJonathan Cameron 	/* Send request */
3349d24322eSJonathan Cameron 	rc = pci_doe_send_req(doe_mb, task);
3359d24322eSJonathan Cameron 	if (rc) {
3369d24322eSJonathan Cameron 		/*
3379d24322eSJonathan Cameron 		 * The specification does not provide any guidance on how to
3389d24322eSJonathan Cameron 		 * resolve conflicting requests from other entities.
3399d24322eSJonathan Cameron 		 * Furthermore, it is likely that busy will not be detected
3409d24322eSJonathan Cameron 		 * most of the time.  Flag any detection of status busy with an
3419d24322eSJonathan Cameron 		 * error.
3429d24322eSJonathan Cameron 		 */
3439d24322eSJonathan Cameron 		if (rc == -EBUSY)
3449d24322eSJonathan Cameron 			dev_err_ratelimited(&pdev->dev, "[%x] busy detected; another entity is sending conflicting requests\n",
3459d24322eSJonathan Cameron 					    offset);
3469d24322eSJonathan Cameron 		signal_task_abort(task, rc);
3479d24322eSJonathan Cameron 		return;
3489d24322eSJonathan Cameron 	}
3499d24322eSJonathan Cameron 
3509d24322eSJonathan Cameron 	timeout_jiffies = jiffies + PCI_DOE_TIMEOUT;
3519d24322eSJonathan Cameron 	/* Poll for response */
3529d24322eSJonathan Cameron retry_resp:
3539d24322eSJonathan Cameron 	pci_read_config_dword(pdev, offset + PCI_DOE_STATUS, &val);
3549d24322eSJonathan Cameron 	if (FIELD_GET(PCI_DOE_STATUS_ERROR, val)) {
3559d24322eSJonathan Cameron 		signal_task_abort(task, -EIO);
3569d24322eSJonathan Cameron 		return;
3579d24322eSJonathan Cameron 	}
3589d24322eSJonathan Cameron 
3599d24322eSJonathan Cameron 	if (!FIELD_GET(PCI_DOE_STATUS_DATA_OBJECT_READY, val)) {
3609d24322eSJonathan Cameron 		if (time_after(jiffies, timeout_jiffies)) {
3619d24322eSJonathan Cameron 			signal_task_abort(task, -EIO);
3629d24322eSJonathan Cameron 			return;
3639d24322eSJonathan Cameron 		}
3649d24322eSJonathan Cameron 		rc = pci_doe_wait(doe_mb, PCI_DOE_POLL_INTERVAL);
3659d24322eSJonathan Cameron 		if (rc) {
3669d24322eSJonathan Cameron 			signal_task_abort(task, rc);
3679d24322eSJonathan Cameron 			return;
3689d24322eSJonathan Cameron 		}
3699d24322eSJonathan Cameron 		goto retry_resp;
3709d24322eSJonathan Cameron 	}
3719d24322eSJonathan Cameron 
3729d24322eSJonathan Cameron 	rc  = pci_doe_recv_resp(doe_mb, task);
3739d24322eSJonathan Cameron 	if (rc < 0) {
3749d24322eSJonathan Cameron 		signal_task_abort(task, rc);
3759d24322eSJonathan Cameron 		return;
3769d24322eSJonathan Cameron 	}
3779d24322eSJonathan Cameron 
3789d24322eSJonathan Cameron 	signal_task_complete(task, rc);
3799d24322eSJonathan Cameron }
3809d24322eSJonathan Cameron 
3819d24322eSJonathan Cameron static void pci_doe_task_complete(struct pci_doe_task *task)
3829d24322eSJonathan Cameron {
3839d24322eSJonathan Cameron 	complete(task->private);
3849d24322eSJonathan Cameron }
3859d24322eSJonathan Cameron 
386*eebab7e3SAlexey Kardashevskiy static int pci_doe_discovery(struct pci_doe_mb *doe_mb, u8 capver, u8 *index, u16 *vid,
3879d24322eSJonathan Cameron 			     u8 *protocol)
3889d24322eSJonathan Cameron {
3899d24322eSJonathan Cameron 	u32 request_pl = FIELD_PREP(PCI_DOE_DATA_OBJECT_DISC_REQ_3_INDEX,
390*eebab7e3SAlexey Kardashevskiy 				    *index) |
391*eebab7e3SAlexey Kardashevskiy 			 FIELD_PREP(PCI_DOE_DATA_OBJECT_DISC_REQ_3_VER,
392*eebab7e3SAlexey Kardashevskiy 				    (capver >= 2) ? 2 : 0);
393fbaa3821SLukas Wunner 	__le32 request_pl_le = cpu_to_le32(request_pl);
394fbaa3821SLukas Wunner 	__le32 response_pl_le;
3959d24322eSJonathan Cameron 	u32 response_pl;
3969d24322eSJonathan Cameron 	int rc;
3979d24322eSJonathan Cameron 
39862e8b17fSLukas Wunner 	rc = pci_doe(doe_mb, PCI_VENDOR_ID_PCI_SIG, PCI_DOE_PROTOCOL_DISCOVERY,
39962e8b17fSLukas Wunner 		     &request_pl_le, sizeof(request_pl_le),
40062e8b17fSLukas Wunner 		     &response_pl_le, sizeof(response_pl_le));
4019d24322eSJonathan Cameron 	if (rc < 0)
4029d24322eSJonathan Cameron 		return rc;
4039d24322eSJonathan Cameron 
40462e8b17fSLukas Wunner 	if (rc != sizeof(response_pl_le))
4059d24322eSJonathan Cameron 		return -EIO;
4069d24322eSJonathan Cameron 
407fbaa3821SLukas Wunner 	response_pl = le32_to_cpu(response_pl_le);
4089d24322eSJonathan Cameron 	*vid = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_VID, response_pl);
4099d24322eSJonathan Cameron 	*protocol = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_PROTOCOL,
4109d24322eSJonathan Cameron 			      response_pl);
4119d24322eSJonathan Cameron 	*index = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_NEXT_INDEX,
4129d24322eSJonathan Cameron 			   response_pl);
4139d24322eSJonathan Cameron 
4149d24322eSJonathan Cameron 	return 0;
4159d24322eSJonathan Cameron }
4169d24322eSJonathan Cameron 
4179d24322eSJonathan Cameron static void *pci_doe_xa_prot_entry(u16 vid, u8 prot)
4189d24322eSJonathan Cameron {
4199d24322eSJonathan Cameron 	return xa_mk_value((vid << 8) | prot);
4209d24322eSJonathan Cameron }
4219d24322eSJonathan Cameron 
4229d24322eSJonathan Cameron static int pci_doe_cache_protocols(struct pci_doe_mb *doe_mb)
4239d24322eSJonathan Cameron {
4249d24322eSJonathan Cameron 	u8 index = 0;
4259d24322eSJonathan Cameron 	u8 xa_idx = 0;
426*eebab7e3SAlexey Kardashevskiy 	u32 hdr = 0;
427*eebab7e3SAlexey Kardashevskiy 
428*eebab7e3SAlexey Kardashevskiy 	pci_read_config_dword(doe_mb->pdev, doe_mb->cap_offset, &hdr);
4299d24322eSJonathan Cameron 
4309d24322eSJonathan Cameron 	do {
4319d24322eSJonathan Cameron 		int rc;
4329d24322eSJonathan Cameron 		u16 vid;
4339d24322eSJonathan Cameron 		u8 prot;
4349d24322eSJonathan Cameron 
435*eebab7e3SAlexey Kardashevskiy 		rc = pci_doe_discovery(doe_mb, PCI_EXT_CAP_VER(hdr), &index,
436*eebab7e3SAlexey Kardashevskiy 				       &vid, &prot);
4379d24322eSJonathan Cameron 		if (rc)
4389d24322eSJonathan Cameron 			return rc;
4399d24322eSJonathan Cameron 
4409d24322eSJonathan Cameron 		pci_dbg(doe_mb->pdev,
4419d24322eSJonathan Cameron 			"[%x] Found protocol %d vid: %x prot: %x\n",
4429d24322eSJonathan Cameron 			doe_mb->cap_offset, xa_idx, vid, prot);
4439d24322eSJonathan Cameron 
4449d24322eSJonathan Cameron 		rc = xa_insert(&doe_mb->prots, xa_idx++,
4459d24322eSJonathan Cameron 			       pci_doe_xa_prot_entry(vid, prot), GFP_KERNEL);
4469d24322eSJonathan Cameron 		if (rc)
4479d24322eSJonathan Cameron 			return rc;
4489d24322eSJonathan Cameron 	} while (index);
4499d24322eSJonathan Cameron 
4509d24322eSJonathan Cameron 	return 0;
4519d24322eSJonathan Cameron }
4529d24322eSJonathan Cameron 
453022b66f3SLukas Wunner static void pci_doe_cancel_tasks(struct pci_doe_mb *doe_mb)
4549d24322eSJonathan Cameron {
4559d24322eSJonathan Cameron 	/* Stop all pending work items from starting */
4569d24322eSJonathan Cameron 	set_bit(PCI_DOE_FLAG_DEAD, &doe_mb->flags);
4579d24322eSJonathan Cameron 
4589d24322eSJonathan Cameron 	/* Cancel an in progress work item, if necessary */
4599d24322eSJonathan Cameron 	set_bit(PCI_DOE_FLAG_CANCEL, &doe_mb->flags);
4609d24322eSJonathan Cameron 	wake_up(&doe_mb->wq);
4619d24322eSJonathan Cameron }
4629d24322eSJonathan Cameron 
4639d24322eSJonathan Cameron /**
464022b66f3SLukas Wunner  * pci_doe_create_mb() - Create a DOE mailbox object
4659d24322eSJonathan Cameron  *
4669d24322eSJonathan Cameron  * @pdev: PCI device to create the DOE mailbox for
4679d24322eSJonathan Cameron  * @cap_offset: Offset of the DOE mailbox
4689d24322eSJonathan Cameron  *
4699d24322eSJonathan Cameron  * Create a single mailbox object to manage the mailbox protocol at the
4709d24322eSJonathan Cameron  * cap_offset specified.
4719d24322eSJonathan Cameron  *
4729d24322eSJonathan Cameron  * RETURNS: created mailbox object on success
4739d24322eSJonathan Cameron  *	    ERR_PTR(-errno) on failure
4749d24322eSJonathan Cameron  */
475022b66f3SLukas Wunner static struct pci_doe_mb *pci_doe_create_mb(struct pci_dev *pdev,
476022b66f3SLukas Wunner 					    u16 cap_offset)
4779d24322eSJonathan Cameron {
4789d24322eSJonathan Cameron 	struct pci_doe_mb *doe_mb;
4799d24322eSJonathan Cameron 	int rc;
4809d24322eSJonathan Cameron 
481022b66f3SLukas Wunner 	doe_mb = kzalloc(sizeof(*doe_mb), GFP_KERNEL);
4829d24322eSJonathan Cameron 	if (!doe_mb)
4839d24322eSJonathan Cameron 		return ERR_PTR(-ENOMEM);
4849d24322eSJonathan Cameron 
4859d24322eSJonathan Cameron 	doe_mb->pdev = pdev;
4869d24322eSJonathan Cameron 	doe_mb->cap_offset = cap_offset;
4879d24322eSJonathan Cameron 	init_waitqueue_head(&doe_mb->wq);
4889d24322eSJonathan Cameron 	xa_init(&doe_mb->prots);
4899d24322eSJonathan Cameron 
4909d24322eSJonathan Cameron 	doe_mb->work_queue = alloc_ordered_workqueue("%s %s DOE [%x]", 0,
49174e491e5SLukas Wunner 						dev_bus_name(&pdev->dev),
4929d24322eSJonathan Cameron 						pci_name(pdev),
4939d24322eSJonathan Cameron 						doe_mb->cap_offset);
4949d24322eSJonathan Cameron 	if (!doe_mb->work_queue) {
4959d24322eSJonathan Cameron 		pci_err(pdev, "[%x] failed to allocate work queue\n",
4969d24322eSJonathan Cameron 			doe_mb->cap_offset);
497022b66f3SLukas Wunner 		rc = -ENOMEM;
498022b66f3SLukas Wunner 		goto err_free;
4999d24322eSJonathan Cameron 	}
5009d24322eSJonathan Cameron 
5019d24322eSJonathan Cameron 	/* Reset the mailbox by issuing an abort */
5029d24322eSJonathan Cameron 	rc = pci_doe_abort(doe_mb);
5039d24322eSJonathan Cameron 	if (rc) {
5049d24322eSJonathan Cameron 		pci_err(pdev, "[%x] failed to reset mailbox with abort command : %d\n",
5059d24322eSJonathan Cameron 			doe_mb->cap_offset, rc);
506022b66f3SLukas Wunner 		goto err_destroy_wq;
5079d24322eSJonathan Cameron 	}
5089d24322eSJonathan Cameron 
5099d24322eSJonathan Cameron 	/*
5109d24322eSJonathan Cameron 	 * The state machine and the mailbox should be in sync now;
511022b66f3SLukas Wunner 	 * Use the mailbox to query protocols.
5129d24322eSJonathan Cameron 	 */
5139d24322eSJonathan Cameron 	rc = pci_doe_cache_protocols(doe_mb);
5149d24322eSJonathan Cameron 	if (rc) {
5159d24322eSJonathan Cameron 		pci_err(pdev, "[%x] failed to cache protocols : %d\n",
5169d24322eSJonathan Cameron 			doe_mb->cap_offset, rc);
517022b66f3SLukas Wunner 		goto err_cancel;
518022b66f3SLukas Wunner 	}
519022b66f3SLukas Wunner 
520022b66f3SLukas Wunner 	return doe_mb;
521022b66f3SLukas Wunner 
522022b66f3SLukas Wunner err_cancel:
523022b66f3SLukas Wunner 	pci_doe_cancel_tasks(doe_mb);
524022b66f3SLukas Wunner 	xa_destroy(&doe_mb->prots);
525022b66f3SLukas Wunner err_destroy_wq:
526022b66f3SLukas Wunner 	destroy_workqueue(doe_mb->work_queue);
527022b66f3SLukas Wunner err_free:
528022b66f3SLukas Wunner 	kfree(doe_mb);
5299d24322eSJonathan Cameron 	return ERR_PTR(rc);
5309d24322eSJonathan Cameron }
5319d24322eSJonathan Cameron 
532022b66f3SLukas Wunner /**
533022b66f3SLukas Wunner  * pci_doe_destroy_mb() - Destroy a DOE mailbox object
534022b66f3SLukas Wunner  *
53574e491e5SLukas Wunner  * @doe_mb: DOE mailbox
536022b66f3SLukas Wunner  *
537022b66f3SLukas Wunner  * Destroy all internal data structures created for the DOE mailbox.
538022b66f3SLukas Wunner  */
53974e491e5SLukas Wunner static void pci_doe_destroy_mb(struct pci_doe_mb *doe_mb)
540022b66f3SLukas Wunner {
541022b66f3SLukas Wunner 	pci_doe_cancel_tasks(doe_mb);
542022b66f3SLukas Wunner 	xa_destroy(&doe_mb->prots);
543022b66f3SLukas Wunner 	destroy_workqueue(doe_mb->work_queue);
544022b66f3SLukas Wunner 	kfree(doe_mb);
545022b66f3SLukas Wunner }
546022b66f3SLukas Wunner 
547022b66f3SLukas Wunner /**
5489d24322eSJonathan Cameron  * pci_doe_supports_prot() - Return if the DOE instance supports the given
5499d24322eSJonathan Cameron  *			     protocol
5509d24322eSJonathan Cameron  * @doe_mb: DOE mailbox capability to query
5519d24322eSJonathan Cameron  * @vid: Protocol Vendor ID
5529d24322eSJonathan Cameron  * @type: Protocol type
5539d24322eSJonathan Cameron  *
5549d24322eSJonathan Cameron  * RETURNS: True if the DOE mailbox supports the protocol specified
5559d24322eSJonathan Cameron  */
55674e491e5SLukas Wunner static bool pci_doe_supports_prot(struct pci_doe_mb *doe_mb, u16 vid, u8 type)
5579d24322eSJonathan Cameron {
5589d24322eSJonathan Cameron 	unsigned long index;
5599d24322eSJonathan Cameron 	void *entry;
5609d24322eSJonathan Cameron 
5619d24322eSJonathan Cameron 	/* The discovery protocol must always be supported */
5629d24322eSJonathan Cameron 	if (vid == PCI_VENDOR_ID_PCI_SIG && type == PCI_DOE_PROTOCOL_DISCOVERY)
5639d24322eSJonathan Cameron 		return true;
5649d24322eSJonathan Cameron 
5659d24322eSJonathan Cameron 	xa_for_each(&doe_mb->prots, index, entry)
5669d24322eSJonathan Cameron 		if (entry == pci_doe_xa_prot_entry(vid, type))
5679d24322eSJonathan Cameron 			return true;
5689d24322eSJonathan Cameron 
5699d24322eSJonathan Cameron 	return false;
5709d24322eSJonathan Cameron }
5719d24322eSJonathan Cameron 
5729d24322eSJonathan Cameron /**
5739d24322eSJonathan Cameron  * pci_doe_submit_task() - Submit a task to be processed by the state machine
5749d24322eSJonathan Cameron  *
5759d24322eSJonathan Cameron  * @doe_mb: DOE mailbox capability to submit to
5769d24322eSJonathan Cameron  * @task: task to be queued
5779d24322eSJonathan Cameron  *
5789d24322eSJonathan Cameron  * Submit a DOE task (request/response) to the DOE mailbox to be processed.
5799d24322eSJonathan Cameron  * Returns upon queueing the task object.  If the queue is full this function
5809d24322eSJonathan Cameron  * will sleep until there is room in the queue.
5819d24322eSJonathan Cameron  *
5829d24322eSJonathan Cameron  * task->complete will be called when the state machine is done processing this
5839d24322eSJonathan Cameron  * task.
5849d24322eSJonathan Cameron  *
58592dc899cSLukas Wunner  * @task must be allocated on the stack.
58692dc899cSLukas Wunner  *
5879d24322eSJonathan Cameron  * Excess data will be discarded.
5889d24322eSJonathan Cameron  *
5899d24322eSJonathan Cameron  * RETURNS: 0 when task has been successfully queued, -ERRNO on error
5909d24322eSJonathan Cameron  */
5910821ff8eSLukas Wunner static int pci_doe_submit_task(struct pci_doe_mb *doe_mb,
5920821ff8eSLukas Wunner 			       struct pci_doe_task *task)
5939d24322eSJonathan Cameron {
5949d24322eSJonathan Cameron 	if (!pci_doe_supports_prot(doe_mb, task->prot.vid, task->prot.type))
5959d24322eSJonathan Cameron 		return -EINVAL;
5969d24322eSJonathan Cameron 
5979d24322eSJonathan Cameron 	if (test_bit(PCI_DOE_FLAG_DEAD, &doe_mb->flags))
5989d24322eSJonathan Cameron 		return -EIO;
5999d24322eSJonathan Cameron 
6009d24322eSJonathan Cameron 	task->doe_mb = doe_mb;
60192dc899cSLukas Wunner 	INIT_WORK_ONSTACK(&task->work, doe_statemachine_work);
6029d24322eSJonathan Cameron 	queue_work(doe_mb->work_queue, &task->work);
6039d24322eSJonathan Cameron 	return 0;
6049d24322eSJonathan Cameron }
60562e8b17fSLukas Wunner 
60662e8b17fSLukas Wunner /**
60762e8b17fSLukas Wunner  * pci_doe() - Perform Data Object Exchange
60862e8b17fSLukas Wunner  *
60962e8b17fSLukas Wunner  * @doe_mb: DOE Mailbox
61062e8b17fSLukas Wunner  * @vendor: Vendor ID
61162e8b17fSLukas Wunner  * @type: Data Object Type
61262e8b17fSLukas Wunner  * @request: Request payload
61362e8b17fSLukas Wunner  * @request_sz: Size of request payload (bytes)
61462e8b17fSLukas Wunner  * @response: Response payload
61562e8b17fSLukas Wunner  * @response_sz: Size of response payload (bytes)
61662e8b17fSLukas Wunner  *
61762e8b17fSLukas Wunner  * Submit @request to @doe_mb and store the @response.
61862e8b17fSLukas Wunner  * The DOE exchange is performed synchronously and may therefore sleep.
61962e8b17fSLukas Wunner  *
62062e8b17fSLukas Wunner  * Payloads are treated as opaque byte streams which are transmitted verbatim,
62162e8b17fSLukas Wunner  * without byte-swapping.  If payloads contain little-endian register values,
62262e8b17fSLukas Wunner  * the caller is responsible for conversion with cpu_to_le32() / le32_to_cpu().
62362e8b17fSLukas Wunner  *
624cedf8d8aSLukas Wunner  * For convenience, arbitrary payload sizes are allowed even though PCIe r6.0
625cedf8d8aSLukas Wunner  * sec 6.30.1 specifies the Data Object Header 2 "Length" in dwords.  The last
626cedf8d8aSLukas Wunner  * (partial) dword is copied with byte granularity and padded with zeroes if
627cedf8d8aSLukas Wunner  * necessary.  Callers are thus relieved of using dword-sized bounce buffers.
628cedf8d8aSLukas Wunner  *
62962e8b17fSLukas Wunner  * RETURNS: Length of received response or negative errno.
63062e8b17fSLukas Wunner  * Received data in excess of @response_sz is discarded.
63162e8b17fSLukas Wunner  * The length may be smaller than @response_sz and the caller
63262e8b17fSLukas Wunner  * is responsible for checking that.
63362e8b17fSLukas Wunner  */
63462e8b17fSLukas Wunner int pci_doe(struct pci_doe_mb *doe_mb, u16 vendor, u8 type,
63562e8b17fSLukas Wunner 	    const void *request, size_t request_sz,
63662e8b17fSLukas Wunner 	    void *response, size_t response_sz)
63762e8b17fSLukas Wunner {
63862e8b17fSLukas Wunner 	DECLARE_COMPLETION_ONSTACK(c);
63962e8b17fSLukas Wunner 	struct pci_doe_task task = {
64062e8b17fSLukas Wunner 		.prot.vid = vendor,
64162e8b17fSLukas Wunner 		.prot.type = type,
64262e8b17fSLukas Wunner 		.request_pl = request,
64362e8b17fSLukas Wunner 		.request_pl_sz = request_sz,
64462e8b17fSLukas Wunner 		.response_pl = response,
64562e8b17fSLukas Wunner 		.response_pl_sz = response_sz,
64662e8b17fSLukas Wunner 		.complete = pci_doe_task_complete,
64762e8b17fSLukas Wunner 		.private = &c,
64862e8b17fSLukas Wunner 	};
64962e8b17fSLukas Wunner 	int rc;
65062e8b17fSLukas Wunner 
65162e8b17fSLukas Wunner 	rc = pci_doe_submit_task(doe_mb, &task);
65262e8b17fSLukas Wunner 	if (rc)
65362e8b17fSLukas Wunner 		return rc;
65462e8b17fSLukas Wunner 
65562e8b17fSLukas Wunner 	wait_for_completion(&c);
65662e8b17fSLukas Wunner 
65762e8b17fSLukas Wunner 	return task.rv;
65862e8b17fSLukas Wunner }
65962e8b17fSLukas Wunner EXPORT_SYMBOL_GPL(pci_doe);
660ac048403SLukas Wunner 
661ac048403SLukas Wunner /**
662ac048403SLukas Wunner  * pci_find_doe_mailbox() - Find Data Object Exchange mailbox
663ac048403SLukas Wunner  *
664ac048403SLukas Wunner  * @pdev: PCI device
665ac048403SLukas Wunner  * @vendor: Vendor ID
666ac048403SLukas Wunner  * @type: Data Object Type
667ac048403SLukas Wunner  *
668ac048403SLukas Wunner  * Find first DOE mailbox of a PCI device which supports the given protocol.
669ac048403SLukas Wunner  *
670ac048403SLukas Wunner  * RETURNS: Pointer to the DOE mailbox or NULL if none was found.
671ac048403SLukas Wunner  */
672ac048403SLukas Wunner struct pci_doe_mb *pci_find_doe_mailbox(struct pci_dev *pdev, u16 vendor,
673ac048403SLukas Wunner 					u8 type)
674ac048403SLukas Wunner {
675ac048403SLukas Wunner 	struct pci_doe_mb *doe_mb;
676ac048403SLukas Wunner 	unsigned long index;
677ac048403SLukas Wunner 
678ac048403SLukas Wunner 	xa_for_each(&pdev->doe_mbs, index, doe_mb)
679ac048403SLukas Wunner 		if (pci_doe_supports_prot(doe_mb, vendor, type))
680ac048403SLukas Wunner 			return doe_mb;
681ac048403SLukas Wunner 
682ac048403SLukas Wunner 	return NULL;
683ac048403SLukas Wunner }
684ac048403SLukas Wunner EXPORT_SYMBOL_GPL(pci_find_doe_mailbox);
685ac048403SLukas Wunner 
686ac048403SLukas Wunner void pci_doe_init(struct pci_dev *pdev)
687ac048403SLukas Wunner {
688ac048403SLukas Wunner 	struct pci_doe_mb *doe_mb;
689ac048403SLukas Wunner 	u16 offset = 0;
690ac048403SLukas Wunner 	int rc;
691ac048403SLukas Wunner 
692ac048403SLukas Wunner 	xa_init(&pdev->doe_mbs);
693ac048403SLukas Wunner 
694ac048403SLukas Wunner 	while ((offset = pci_find_next_ext_capability(pdev, offset,
695ac048403SLukas Wunner 						      PCI_EXT_CAP_ID_DOE))) {
696ac048403SLukas Wunner 		doe_mb = pci_doe_create_mb(pdev, offset);
697ac048403SLukas Wunner 		if (IS_ERR(doe_mb)) {
698ac048403SLukas Wunner 			pci_err(pdev, "[%x] failed to create mailbox: %ld\n",
699ac048403SLukas Wunner 				offset, PTR_ERR(doe_mb));
700ac048403SLukas Wunner 			continue;
701ac048403SLukas Wunner 		}
702ac048403SLukas Wunner 
703ac048403SLukas Wunner 		rc = xa_insert(&pdev->doe_mbs, offset, doe_mb, GFP_KERNEL);
704ac048403SLukas Wunner 		if (rc) {
705ac048403SLukas Wunner 			pci_err(pdev, "[%x] failed to insert mailbox: %d\n",
706ac048403SLukas Wunner 				offset, rc);
707ac048403SLukas Wunner 			pci_doe_destroy_mb(doe_mb);
708ac048403SLukas Wunner 		}
709ac048403SLukas Wunner 	}
710ac048403SLukas Wunner }
711ac048403SLukas Wunner 
712ac048403SLukas Wunner void pci_doe_destroy(struct pci_dev *pdev)
713ac048403SLukas Wunner {
714ac048403SLukas Wunner 	struct pci_doe_mb *doe_mb;
715ac048403SLukas Wunner 	unsigned long index;
716ac048403SLukas Wunner 
717ac048403SLukas Wunner 	xa_for_each(&pdev->doe_mbs, index, doe_mb)
718ac048403SLukas Wunner 		pci_doe_destroy_mb(doe_mb);
719ac048403SLukas Wunner 
720ac048403SLukas Wunner 	xa_destroy(&pdev->doe_mbs);
721ac048403SLukas Wunner }
722ac048403SLukas Wunner 
723ac048403SLukas Wunner void pci_doe_disconnected(struct pci_dev *pdev)
724ac048403SLukas Wunner {
725ac048403SLukas Wunner 	struct pci_doe_mb *doe_mb;
726ac048403SLukas Wunner 	unsigned long index;
727ac048403SLukas Wunner 
728ac048403SLukas Wunner 	xa_for_each(&pdev->doe_mbs, index, doe_mb)
729ac048403SLukas Wunner 		pci_doe_cancel_tasks(doe_mb);
730ac048403SLukas Wunner }
731