1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Data Object Exchange 4 * PCIe r6.0, sec 6.30 DOE 5 * 6 * Copyright (C) 2021 Huawei 7 * Jonathan Cameron <Jonathan.Cameron@huawei.com> 8 * 9 * Copyright (C) 2022 Intel Corporation 10 * Ira Weiny <ira.weiny@intel.com> 11 */ 12 13 #define dev_fmt(fmt) "DOE: " fmt 14 15 #include <linux/bitfield.h> 16 #include <linux/delay.h> 17 #include <linux/jiffies.h> 18 #include <linux/mutex.h> 19 #include <linux/pci.h> 20 #include <linux/pci-doe.h> 21 #include <linux/workqueue.h> 22 23 #define PCI_DOE_PROTOCOL_DISCOVERY 0 24 25 /* Timeout of 1 second from 6.30.2 Operation, PCI Spec r6.0 */ 26 #define PCI_DOE_TIMEOUT HZ 27 #define PCI_DOE_POLL_INTERVAL (PCI_DOE_TIMEOUT / 128) 28 29 #define PCI_DOE_FLAG_CANCEL 0 30 #define PCI_DOE_FLAG_DEAD 1 31 32 /** 33 * struct pci_doe_mb - State for a single DOE mailbox 34 * 35 * This state is used to manage a single DOE mailbox capability. All fields 36 * should be considered opaque to the consumers and the structure passed into 37 * the helpers below after being created by devm_pci_doe_create() 38 * 39 * @pdev: PCI device this mailbox belongs to 40 * @cap_offset: Capability offset 41 * @prots: Array of protocols supported (encoded as long values) 42 * @wq: Wait queue for work item 43 * @work_queue: Queue of pci_doe_work items 44 * @flags: Bit array of PCI_DOE_FLAG_* flags 45 */ 46 struct pci_doe_mb { 47 struct pci_dev *pdev; 48 u16 cap_offset; 49 struct xarray prots; 50 51 wait_queue_head_t wq; 52 struct workqueue_struct *work_queue; 53 unsigned long flags; 54 }; 55 56 static int pci_doe_wait(struct pci_doe_mb *doe_mb, unsigned long timeout) 57 { 58 if (wait_event_timeout(doe_mb->wq, 59 test_bit(PCI_DOE_FLAG_CANCEL, &doe_mb->flags), 60 timeout)) 61 return -EIO; 62 return 0; 63 } 64 65 static void pci_doe_write_ctrl(struct pci_doe_mb *doe_mb, u32 val) 66 { 67 struct pci_dev *pdev = doe_mb->pdev; 68 int offset = doe_mb->cap_offset; 69 70 pci_write_config_dword(pdev, offset + PCI_DOE_CTRL, val); 71 } 72 73 static int pci_doe_abort(struct pci_doe_mb *doe_mb) 74 { 75 struct pci_dev *pdev = doe_mb->pdev; 76 int offset = doe_mb->cap_offset; 77 unsigned long timeout_jiffies; 78 79 pci_dbg(pdev, "[%x] Issuing Abort\n", offset); 80 81 timeout_jiffies = jiffies + PCI_DOE_TIMEOUT; 82 pci_doe_write_ctrl(doe_mb, PCI_DOE_CTRL_ABORT); 83 84 do { 85 int rc; 86 u32 val; 87 88 rc = pci_doe_wait(doe_mb, PCI_DOE_POLL_INTERVAL); 89 if (rc) 90 return rc; 91 pci_read_config_dword(pdev, offset + PCI_DOE_STATUS, &val); 92 93 /* Abort success! */ 94 if (!FIELD_GET(PCI_DOE_STATUS_ERROR, val) && 95 !FIELD_GET(PCI_DOE_STATUS_BUSY, val)) 96 return 0; 97 98 } while (!time_after(jiffies, timeout_jiffies)); 99 100 /* Abort has timed out and the MB is dead */ 101 pci_err(pdev, "[%x] ABORT timed out\n", offset); 102 return -EIO; 103 } 104 105 static int pci_doe_send_req(struct pci_doe_mb *doe_mb, 106 struct pci_doe_task *task) 107 { 108 struct pci_dev *pdev = doe_mb->pdev; 109 int offset = doe_mb->cap_offset; 110 u32 val; 111 int i; 112 113 /* 114 * Check the DOE busy bit is not set. If it is set, this could indicate 115 * someone other than Linux (e.g. firmware) is using the mailbox. Note 116 * it is expected that firmware and OS will negotiate access rights via 117 * an, as yet to be defined, method. 118 */ 119 pci_read_config_dword(pdev, offset + PCI_DOE_STATUS, &val); 120 if (FIELD_GET(PCI_DOE_STATUS_BUSY, val)) 121 return -EBUSY; 122 123 if (FIELD_GET(PCI_DOE_STATUS_ERROR, val)) 124 return -EIO; 125 126 /* Write DOE Header */ 127 val = FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_1_VID, task->prot.vid) | 128 FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_1_TYPE, task->prot.type); 129 pci_write_config_dword(pdev, offset + PCI_DOE_WRITE, val); 130 /* Length is 2 DW of header + length of payload in DW */ 131 pci_write_config_dword(pdev, offset + PCI_DOE_WRITE, 132 FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_2_LENGTH, 133 2 + task->request_pl_sz / 134 sizeof(u32))); 135 for (i = 0; i < task->request_pl_sz / sizeof(u32); i++) 136 pci_write_config_dword(pdev, offset + PCI_DOE_WRITE, 137 task->request_pl[i]); 138 139 pci_doe_write_ctrl(doe_mb, PCI_DOE_CTRL_GO); 140 141 return 0; 142 } 143 144 static bool pci_doe_data_obj_ready(struct pci_doe_mb *doe_mb) 145 { 146 struct pci_dev *pdev = doe_mb->pdev; 147 int offset = doe_mb->cap_offset; 148 u32 val; 149 150 pci_read_config_dword(pdev, offset + PCI_DOE_STATUS, &val); 151 if (FIELD_GET(PCI_DOE_STATUS_DATA_OBJECT_READY, val)) 152 return true; 153 return false; 154 } 155 156 static int pci_doe_recv_resp(struct pci_doe_mb *doe_mb, struct pci_doe_task *task) 157 { 158 struct pci_dev *pdev = doe_mb->pdev; 159 int offset = doe_mb->cap_offset; 160 size_t length, payload_length; 161 u32 val; 162 int i; 163 164 /* Read the first dword to get the protocol */ 165 pci_read_config_dword(pdev, offset + PCI_DOE_READ, &val); 166 if ((FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_VID, val) != task->prot.vid) || 167 (FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_TYPE, val) != task->prot.type)) { 168 dev_err_ratelimited(&pdev->dev, "[%x] expected [VID, Protocol] = [%04x, %02x], got [%04x, %02x]\n", 169 doe_mb->cap_offset, task->prot.vid, task->prot.type, 170 FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_VID, val), 171 FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_TYPE, val)); 172 return -EIO; 173 } 174 175 pci_write_config_dword(pdev, offset + PCI_DOE_READ, 0); 176 /* Read the second dword to get the length */ 177 pci_read_config_dword(pdev, offset + PCI_DOE_READ, &val); 178 pci_write_config_dword(pdev, offset + PCI_DOE_READ, 0); 179 180 length = FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_2_LENGTH, val); 181 if (length > SZ_1M || length < 2) 182 return -EIO; 183 184 /* First 2 dwords have already been read */ 185 length -= 2; 186 payload_length = min(length, task->response_pl_sz / sizeof(u32)); 187 /* Read the rest of the response payload */ 188 for (i = 0; i < payload_length; i++) { 189 pci_read_config_dword(pdev, offset + PCI_DOE_READ, 190 &task->response_pl[i]); 191 /* Prior to the last ack, ensure Data Object Ready */ 192 if (i == (payload_length - 1) && !pci_doe_data_obj_ready(doe_mb)) 193 return -EIO; 194 pci_write_config_dword(pdev, offset + PCI_DOE_READ, 0); 195 } 196 197 /* Flush excess length */ 198 for (; i < length; i++) { 199 pci_read_config_dword(pdev, offset + PCI_DOE_READ, &val); 200 pci_write_config_dword(pdev, offset + PCI_DOE_READ, 0); 201 } 202 203 /* Final error check to pick up on any since Data Object Ready */ 204 pci_read_config_dword(pdev, offset + PCI_DOE_STATUS, &val); 205 if (FIELD_GET(PCI_DOE_STATUS_ERROR, val)) 206 return -EIO; 207 208 return min(length, task->response_pl_sz / sizeof(u32)) * sizeof(u32); 209 } 210 211 static void signal_task_complete(struct pci_doe_task *task, int rv) 212 { 213 task->rv = rv; 214 task->complete(task); 215 } 216 217 static void signal_task_abort(struct pci_doe_task *task, int rv) 218 { 219 struct pci_doe_mb *doe_mb = task->doe_mb; 220 struct pci_dev *pdev = doe_mb->pdev; 221 222 if (pci_doe_abort(doe_mb)) { 223 /* 224 * If the device can't process an abort; set the mailbox dead 225 * - no more submissions 226 */ 227 pci_err(pdev, "[%x] Abort failed marking mailbox dead\n", 228 doe_mb->cap_offset); 229 set_bit(PCI_DOE_FLAG_DEAD, &doe_mb->flags); 230 } 231 signal_task_complete(task, rv); 232 } 233 234 static void doe_statemachine_work(struct work_struct *work) 235 { 236 struct pci_doe_task *task = container_of(work, struct pci_doe_task, 237 work); 238 struct pci_doe_mb *doe_mb = task->doe_mb; 239 struct pci_dev *pdev = doe_mb->pdev; 240 int offset = doe_mb->cap_offset; 241 unsigned long timeout_jiffies; 242 u32 val; 243 int rc; 244 245 if (test_bit(PCI_DOE_FLAG_DEAD, &doe_mb->flags)) { 246 signal_task_complete(task, -EIO); 247 return; 248 } 249 250 /* Send request */ 251 rc = pci_doe_send_req(doe_mb, task); 252 if (rc) { 253 /* 254 * The specification does not provide any guidance on how to 255 * resolve conflicting requests from other entities. 256 * Furthermore, it is likely that busy will not be detected 257 * most of the time. Flag any detection of status busy with an 258 * error. 259 */ 260 if (rc == -EBUSY) 261 dev_err_ratelimited(&pdev->dev, "[%x] busy detected; another entity is sending conflicting requests\n", 262 offset); 263 signal_task_abort(task, rc); 264 return; 265 } 266 267 timeout_jiffies = jiffies + PCI_DOE_TIMEOUT; 268 /* Poll for response */ 269 retry_resp: 270 pci_read_config_dword(pdev, offset + PCI_DOE_STATUS, &val); 271 if (FIELD_GET(PCI_DOE_STATUS_ERROR, val)) { 272 signal_task_abort(task, -EIO); 273 return; 274 } 275 276 if (!FIELD_GET(PCI_DOE_STATUS_DATA_OBJECT_READY, val)) { 277 if (time_after(jiffies, timeout_jiffies)) { 278 signal_task_abort(task, -EIO); 279 return; 280 } 281 rc = pci_doe_wait(doe_mb, PCI_DOE_POLL_INTERVAL); 282 if (rc) { 283 signal_task_abort(task, rc); 284 return; 285 } 286 goto retry_resp; 287 } 288 289 rc = pci_doe_recv_resp(doe_mb, task); 290 if (rc < 0) { 291 signal_task_abort(task, rc); 292 return; 293 } 294 295 signal_task_complete(task, rc); 296 } 297 298 static void pci_doe_task_complete(struct pci_doe_task *task) 299 { 300 complete(task->private); 301 } 302 303 static int pci_doe_discovery(struct pci_doe_mb *doe_mb, u8 *index, u16 *vid, 304 u8 *protocol) 305 { 306 u32 request_pl = FIELD_PREP(PCI_DOE_DATA_OBJECT_DISC_REQ_3_INDEX, 307 *index); 308 u32 response_pl; 309 DECLARE_COMPLETION_ONSTACK(c); 310 struct pci_doe_task task = { 311 .prot.vid = PCI_VENDOR_ID_PCI_SIG, 312 .prot.type = PCI_DOE_PROTOCOL_DISCOVERY, 313 .request_pl = &request_pl, 314 .request_pl_sz = sizeof(request_pl), 315 .response_pl = &response_pl, 316 .response_pl_sz = sizeof(response_pl), 317 .complete = pci_doe_task_complete, 318 .private = &c, 319 }; 320 int rc; 321 322 rc = pci_doe_submit_task(doe_mb, &task); 323 if (rc < 0) 324 return rc; 325 326 wait_for_completion(&c); 327 328 if (task.rv != sizeof(response_pl)) 329 return -EIO; 330 331 *vid = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_VID, response_pl); 332 *protocol = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_PROTOCOL, 333 response_pl); 334 *index = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_NEXT_INDEX, 335 response_pl); 336 337 return 0; 338 } 339 340 static void *pci_doe_xa_prot_entry(u16 vid, u8 prot) 341 { 342 return xa_mk_value((vid << 8) | prot); 343 } 344 345 static int pci_doe_cache_protocols(struct pci_doe_mb *doe_mb) 346 { 347 u8 index = 0; 348 u8 xa_idx = 0; 349 350 do { 351 int rc; 352 u16 vid; 353 u8 prot; 354 355 rc = pci_doe_discovery(doe_mb, &index, &vid, &prot); 356 if (rc) 357 return rc; 358 359 pci_dbg(doe_mb->pdev, 360 "[%x] Found protocol %d vid: %x prot: %x\n", 361 doe_mb->cap_offset, xa_idx, vid, prot); 362 363 rc = xa_insert(&doe_mb->prots, xa_idx++, 364 pci_doe_xa_prot_entry(vid, prot), GFP_KERNEL); 365 if (rc) 366 return rc; 367 } while (index); 368 369 return 0; 370 } 371 372 static void pci_doe_xa_destroy(void *mb) 373 { 374 struct pci_doe_mb *doe_mb = mb; 375 376 xa_destroy(&doe_mb->prots); 377 } 378 379 static void pci_doe_destroy_workqueue(void *mb) 380 { 381 struct pci_doe_mb *doe_mb = mb; 382 383 destroy_workqueue(doe_mb->work_queue); 384 } 385 386 static void pci_doe_flush_mb(void *mb) 387 { 388 struct pci_doe_mb *doe_mb = mb; 389 390 /* Stop all pending work items from starting */ 391 set_bit(PCI_DOE_FLAG_DEAD, &doe_mb->flags); 392 393 /* Cancel an in progress work item, if necessary */ 394 set_bit(PCI_DOE_FLAG_CANCEL, &doe_mb->flags); 395 wake_up(&doe_mb->wq); 396 397 /* Flush all work items */ 398 flush_workqueue(doe_mb->work_queue); 399 } 400 401 /** 402 * pcim_doe_create_mb() - Create a DOE mailbox object 403 * 404 * @pdev: PCI device to create the DOE mailbox for 405 * @cap_offset: Offset of the DOE mailbox 406 * 407 * Create a single mailbox object to manage the mailbox protocol at the 408 * cap_offset specified. 409 * 410 * RETURNS: created mailbox object on success 411 * ERR_PTR(-errno) on failure 412 */ 413 struct pci_doe_mb *pcim_doe_create_mb(struct pci_dev *pdev, u16 cap_offset) 414 { 415 struct pci_doe_mb *doe_mb; 416 struct device *dev = &pdev->dev; 417 int rc; 418 419 doe_mb = devm_kzalloc(dev, sizeof(*doe_mb), GFP_KERNEL); 420 if (!doe_mb) 421 return ERR_PTR(-ENOMEM); 422 423 doe_mb->pdev = pdev; 424 doe_mb->cap_offset = cap_offset; 425 init_waitqueue_head(&doe_mb->wq); 426 427 xa_init(&doe_mb->prots); 428 rc = devm_add_action(dev, pci_doe_xa_destroy, doe_mb); 429 if (rc) 430 return ERR_PTR(rc); 431 432 doe_mb->work_queue = alloc_ordered_workqueue("%s %s DOE [%x]", 0, 433 dev_driver_string(&pdev->dev), 434 pci_name(pdev), 435 doe_mb->cap_offset); 436 if (!doe_mb->work_queue) { 437 pci_err(pdev, "[%x] failed to allocate work queue\n", 438 doe_mb->cap_offset); 439 return ERR_PTR(-ENOMEM); 440 } 441 rc = devm_add_action_or_reset(dev, pci_doe_destroy_workqueue, doe_mb); 442 if (rc) 443 return ERR_PTR(rc); 444 445 /* Reset the mailbox by issuing an abort */ 446 rc = pci_doe_abort(doe_mb); 447 if (rc) { 448 pci_err(pdev, "[%x] failed to reset mailbox with abort command : %d\n", 449 doe_mb->cap_offset, rc); 450 return ERR_PTR(rc); 451 } 452 453 /* 454 * The state machine and the mailbox should be in sync now; 455 * Set up mailbox flush prior to using the mailbox to query protocols. 456 */ 457 rc = devm_add_action_or_reset(dev, pci_doe_flush_mb, doe_mb); 458 if (rc) 459 return ERR_PTR(rc); 460 461 rc = pci_doe_cache_protocols(doe_mb); 462 if (rc) { 463 pci_err(pdev, "[%x] failed to cache protocols : %d\n", 464 doe_mb->cap_offset, rc); 465 return ERR_PTR(rc); 466 } 467 468 return doe_mb; 469 } 470 EXPORT_SYMBOL_GPL(pcim_doe_create_mb); 471 472 /** 473 * pci_doe_supports_prot() - Return if the DOE instance supports the given 474 * protocol 475 * @doe_mb: DOE mailbox capability to query 476 * @vid: Protocol Vendor ID 477 * @type: Protocol type 478 * 479 * RETURNS: True if the DOE mailbox supports the protocol specified 480 */ 481 bool pci_doe_supports_prot(struct pci_doe_mb *doe_mb, u16 vid, u8 type) 482 { 483 unsigned long index; 484 void *entry; 485 486 /* The discovery protocol must always be supported */ 487 if (vid == PCI_VENDOR_ID_PCI_SIG && type == PCI_DOE_PROTOCOL_DISCOVERY) 488 return true; 489 490 xa_for_each(&doe_mb->prots, index, entry) 491 if (entry == pci_doe_xa_prot_entry(vid, type)) 492 return true; 493 494 return false; 495 } 496 EXPORT_SYMBOL_GPL(pci_doe_supports_prot); 497 498 /** 499 * pci_doe_submit_task() - Submit a task to be processed by the state machine 500 * 501 * @doe_mb: DOE mailbox capability to submit to 502 * @task: task to be queued 503 * 504 * Submit a DOE task (request/response) to the DOE mailbox to be processed. 505 * Returns upon queueing the task object. If the queue is full this function 506 * will sleep until there is room in the queue. 507 * 508 * task->complete will be called when the state machine is done processing this 509 * task. 510 * 511 * Excess data will be discarded. 512 * 513 * RETURNS: 0 when task has been successfully queued, -ERRNO on error 514 */ 515 int pci_doe_submit_task(struct pci_doe_mb *doe_mb, struct pci_doe_task *task) 516 { 517 if (!pci_doe_supports_prot(doe_mb, task->prot.vid, task->prot.type)) 518 return -EINVAL; 519 520 /* 521 * DOE requests must be a whole number of DW and the response needs to 522 * be big enough for at least 1 DW 523 */ 524 if (task->request_pl_sz % sizeof(u32) || 525 task->response_pl_sz < sizeof(u32)) 526 return -EINVAL; 527 528 if (test_bit(PCI_DOE_FLAG_DEAD, &doe_mb->flags)) 529 return -EIO; 530 531 task->doe_mb = doe_mb; 532 INIT_WORK(&task->work, doe_statemachine_work); 533 queue_work(doe_mb->work_queue, &task->work); 534 return 0; 535 } 536 EXPORT_SYMBOL_GPL(pci_doe_submit_task); 537