1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Adjunct processor matrix VFIO device driver callbacks. 4 * 5 * Copyright IBM Corp. 2018 6 * 7 * Author(s): Tony Krowiak <akrowiak@linux.ibm.com> 8 * Halil Pasic <pasic@linux.ibm.com> 9 * Pierre Morel <pmorel@linux.ibm.com> 10 */ 11 #include <linux/string.h> 12 #include <linux/vfio.h> 13 #include <linux/device.h> 14 #include <linux/list.h> 15 #include <linux/ctype.h> 16 #include <linux/bitops.h> 17 #include <linux/kvm_host.h> 18 #include <linux/module.h> 19 #include <linux/uuid.h> 20 #include <asm/kvm.h> 21 #include <asm/zcrypt.h> 22 23 #include "vfio_ap_private.h" 24 #include "vfio_ap_debug.h" 25 26 #define VFIO_AP_MDEV_TYPE_HWVIRT "passthrough" 27 #define VFIO_AP_MDEV_NAME_HWVIRT "VFIO AP Passthrough Device" 28 29 #define AP_QUEUE_ASSIGNED "assigned" 30 #define AP_QUEUE_UNASSIGNED "unassigned" 31 #define AP_QUEUE_IN_USE "in use" 32 33 #define AP_RESET_INTERVAL 20 /* Reset sleep interval (20ms) */ 34 35 static int vfio_ap_mdev_reset_queues(struct ap_matrix_mdev *matrix_mdev); 36 static int vfio_ap_mdev_reset_qlist(struct list_head *qlist); 37 static struct vfio_ap_queue *vfio_ap_find_queue(int apqn); 38 static const struct vfio_device_ops vfio_ap_matrix_dev_ops; 39 static void vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q); 40 41 /** 42 * get_update_locks_for_kvm: Acquire the locks required to dynamically update a 43 * KVM guest's APCB in the proper order. 44 * 45 * @kvm: a pointer to a struct kvm object containing the KVM guest's APCB. 46 * 47 * The proper locking order is: 48 * 1. matrix_dev->guests_lock: required to use the KVM pointer to update a KVM 49 * guest's APCB. 50 * 2. kvm->lock: required to update a guest's APCB 51 * 3. matrix_dev->mdevs_lock: required to access data stored in a matrix_mdev 52 * 53 * Note: If @kvm is NULL, the KVM lock will not be taken. 54 */ 55 static inline void get_update_locks_for_kvm(struct kvm *kvm) 56 { 57 mutex_lock(&matrix_dev->guests_lock); 58 if (kvm) 59 mutex_lock(&kvm->lock); 60 mutex_lock(&matrix_dev->mdevs_lock); 61 } 62 63 /** 64 * release_update_locks_for_kvm: Release the locks used to dynamically update a 65 * KVM guest's APCB in the proper order. 66 * 67 * @kvm: a pointer to a struct kvm object containing the KVM guest's APCB. 68 * 69 * The proper unlocking order is: 70 * 1. matrix_dev->mdevs_lock 71 * 2. kvm->lock 72 * 3. matrix_dev->guests_lock 73 * 74 * Note: If @kvm is NULL, the KVM lock will not be released. 75 */ 76 static inline void release_update_locks_for_kvm(struct kvm *kvm) 77 { 78 mutex_unlock(&matrix_dev->mdevs_lock); 79 if (kvm) 80 mutex_unlock(&kvm->lock); 81 mutex_unlock(&matrix_dev->guests_lock); 82 } 83 84 /** 85 * get_update_locks_for_mdev: Acquire the locks required to dynamically update a 86 * KVM guest's APCB in the proper order. 87 * 88 * @matrix_mdev: a pointer to a struct ap_matrix_mdev object containing the AP 89 * configuration data to use to update a KVM guest's APCB. 90 * 91 * The proper locking order is: 92 * 1. matrix_dev->guests_lock: required to use the KVM pointer to update a KVM 93 * guest's APCB. 94 * 2. matrix_mdev->kvm->lock: required to update a guest's APCB 95 * 3. matrix_dev->mdevs_lock: required to access data stored in a matrix_mdev 96 * 97 * Note: If @matrix_mdev is NULL or is not attached to a KVM guest, the KVM 98 * lock will not be taken. 99 */ 100 static inline void get_update_locks_for_mdev(struct ap_matrix_mdev *matrix_mdev) 101 { 102 mutex_lock(&matrix_dev->guests_lock); 103 if (matrix_mdev && matrix_mdev->kvm) 104 mutex_lock(&matrix_mdev->kvm->lock); 105 mutex_lock(&matrix_dev->mdevs_lock); 106 } 107 108 /** 109 * release_update_locks_for_mdev: Release the locks used to dynamically update a 110 * KVM guest's APCB in the proper order. 111 * 112 * @matrix_mdev: a pointer to a struct ap_matrix_mdev object containing the AP 113 * configuration data to use to update a KVM guest's APCB. 114 * 115 * The proper unlocking order is: 116 * 1. matrix_dev->mdevs_lock 117 * 2. matrix_mdev->kvm->lock 118 * 3. matrix_dev->guests_lock 119 * 120 * Note: If @matrix_mdev is NULL or is not attached to a KVM guest, the KVM 121 * lock will not be released. 122 */ 123 static inline void release_update_locks_for_mdev(struct ap_matrix_mdev *matrix_mdev) 124 { 125 mutex_unlock(&matrix_dev->mdevs_lock); 126 if (matrix_mdev && matrix_mdev->kvm) 127 mutex_unlock(&matrix_mdev->kvm->lock); 128 mutex_unlock(&matrix_dev->guests_lock); 129 } 130 131 /** 132 * get_update_locks_by_apqn: Find the mdev to which an APQN is assigned and 133 * acquire the locks required to update the APCB of 134 * the KVM guest to which the mdev is attached. 135 * 136 * @apqn: the APQN of a queue device. 137 * 138 * The proper locking order is: 139 * 1. matrix_dev->guests_lock: required to use the KVM pointer to update a KVM 140 * guest's APCB. 141 * 2. matrix_mdev->kvm->lock: required to update a guest's APCB 142 * 3. matrix_dev->mdevs_lock: required to access data stored in a matrix_mdev 143 * 144 * Note: If @apqn is not assigned to a matrix_mdev, the matrix_mdev->kvm->lock 145 * will not be taken. 146 * 147 * Return: the ap_matrix_mdev object to which @apqn is assigned or NULL if @apqn 148 * is not assigned to an ap_matrix_mdev. 149 */ 150 static struct ap_matrix_mdev *get_update_locks_by_apqn(int apqn) 151 { 152 struct ap_matrix_mdev *matrix_mdev; 153 154 mutex_lock(&matrix_dev->guests_lock); 155 156 list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) { 157 if (test_bit_inv(AP_QID_CARD(apqn), matrix_mdev->matrix.apm) && 158 test_bit_inv(AP_QID_QUEUE(apqn), matrix_mdev->matrix.aqm)) { 159 if (matrix_mdev->kvm) 160 mutex_lock(&matrix_mdev->kvm->lock); 161 162 mutex_lock(&matrix_dev->mdevs_lock); 163 164 return matrix_mdev; 165 } 166 } 167 168 mutex_lock(&matrix_dev->mdevs_lock); 169 170 return NULL; 171 } 172 173 /** 174 * get_update_locks_for_queue: get the locks required to update the APCB of the 175 * KVM guest to which the matrix mdev linked to a 176 * vfio_ap_queue object is attached. 177 * 178 * @q: a pointer to a vfio_ap_queue object. 179 * 180 * The proper locking order is: 181 * 1. q->matrix_dev->guests_lock: required to use the KVM pointer to update a 182 * KVM guest's APCB. 183 * 2. q->matrix_mdev->kvm->lock: required to update a guest's APCB 184 * 3. matrix_dev->mdevs_lock: required to access data stored in matrix_mdev 185 * 186 * Note: if @queue is not linked to an ap_matrix_mdev object, the KVM lock 187 * will not be taken. 188 */ 189 static inline void get_update_locks_for_queue(struct vfio_ap_queue *q) 190 { 191 mutex_lock(&matrix_dev->guests_lock); 192 if (q->matrix_mdev && q->matrix_mdev->kvm) 193 mutex_lock(&q->matrix_mdev->kvm->lock); 194 mutex_lock(&matrix_dev->mdevs_lock); 195 } 196 197 /** 198 * vfio_ap_mdev_get_queue - retrieve a queue with a specific APQN from a 199 * hash table of queues assigned to a matrix mdev 200 * @matrix_mdev: the matrix mdev 201 * @apqn: The APQN of a queue device 202 * 203 * Return: the pointer to the vfio_ap_queue struct representing the queue or 204 * NULL if the queue is not assigned to @matrix_mdev 205 */ 206 static struct vfio_ap_queue *vfio_ap_mdev_get_queue( 207 struct ap_matrix_mdev *matrix_mdev, 208 int apqn) 209 { 210 struct vfio_ap_queue *q; 211 212 hash_for_each_possible(matrix_mdev->qtable.queues, q, mdev_qnode, 213 apqn) { 214 if (q && q->apqn == apqn) 215 return q; 216 } 217 218 return NULL; 219 } 220 221 /** 222 * vfio_ap_wait_for_irqclear - clears the IR bit or gives up after 5 tries 223 * @apqn: The AP Queue number 224 * 225 * Checks the IRQ bit for the status of this APQN using ap_tapq. 226 * Returns if the ap_tapq function succeeded and the bit is clear. 227 * Returns if ap_tapq function failed with invalid, deconfigured or 228 * checkstopped AP. 229 * Otherwise retries up to 5 times after waiting 20ms. 230 */ 231 static void vfio_ap_wait_for_irqclear(int apqn) 232 { 233 struct ap_queue_status status; 234 int retry = 5; 235 236 do { 237 status = ap_tapq(apqn, NULL); 238 switch (status.response_code) { 239 case AP_RESPONSE_NORMAL: 240 case AP_RESPONSE_RESET_IN_PROGRESS: 241 if (!status.irq_enabled) 242 return; 243 fallthrough; 244 case AP_RESPONSE_BUSY: 245 msleep(20); 246 break; 247 case AP_RESPONSE_Q_NOT_AVAIL: 248 case AP_RESPONSE_DECONFIGURED: 249 case AP_RESPONSE_CHECKSTOPPED: 250 default: 251 WARN_ONCE(1, "%s: tapq rc %02x: %04x\n", __func__, 252 status.response_code, apqn); 253 return; 254 } 255 } while (--retry); 256 257 WARN_ONCE(1, "%s: tapq rc %02x: %04x could not clear IR bit\n", 258 __func__, status.response_code, apqn); 259 } 260 261 /** 262 * vfio_ap_free_aqic_resources - free vfio_ap_queue resources 263 * @q: The vfio_ap_queue 264 * 265 * Unregisters the ISC in the GIB when the saved ISC not invalid. 266 * Unpins the guest's page holding the NIB when it exists. 267 * Resets the saved_iova and saved_isc to invalid values. 268 */ 269 static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q) 270 { 271 if (!q) 272 return; 273 if (q->saved_isc != VFIO_AP_ISC_INVALID && 274 !WARN_ON(!(q->matrix_mdev && q->matrix_mdev->kvm))) { 275 kvm_s390_gisc_unregister(q->matrix_mdev->kvm, q->saved_isc); 276 q->saved_isc = VFIO_AP_ISC_INVALID; 277 } 278 if (q->saved_iova && !WARN_ON(!q->matrix_mdev)) { 279 vfio_unpin_pages(&q->matrix_mdev->vdev, q->saved_iova, 1); 280 q->saved_iova = 0; 281 } 282 } 283 284 /** 285 * vfio_ap_irq_disable - disables and clears an ap_queue interrupt 286 * @q: The vfio_ap_queue 287 * 288 * Uses ap_aqic to disable the interruption and in case of success, reset 289 * in progress or IRQ disable command already proceeded: calls 290 * vfio_ap_wait_for_irqclear() to check for the IRQ bit to be clear 291 * and calls vfio_ap_free_aqic_resources() to free the resources associated 292 * with the AP interrupt handling. 293 * 294 * In the case the AP is busy, or a reset is in progress, 295 * retries after 20ms, up to 5 times. 296 * 297 * Returns if ap_aqic function failed with invalid, deconfigured or 298 * checkstopped AP. 299 * 300 * Return: &struct ap_queue_status 301 */ 302 static struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q) 303 { 304 union ap_qirq_ctrl aqic_gisa = { .value = 0 }; 305 struct ap_queue_status status; 306 int retries = 5; 307 308 do { 309 status = ap_aqic(q->apqn, aqic_gisa, 0); 310 switch (status.response_code) { 311 case AP_RESPONSE_OTHERWISE_CHANGED: 312 case AP_RESPONSE_NORMAL: 313 vfio_ap_wait_for_irqclear(q->apqn); 314 goto end_free; 315 case AP_RESPONSE_RESET_IN_PROGRESS: 316 case AP_RESPONSE_BUSY: 317 msleep(20); 318 break; 319 case AP_RESPONSE_Q_NOT_AVAIL: 320 case AP_RESPONSE_DECONFIGURED: 321 case AP_RESPONSE_CHECKSTOPPED: 322 case AP_RESPONSE_INVALID_ADDRESS: 323 default: 324 /* All cases in default means AP not operational */ 325 WARN_ONCE(1, "%s: ap_aqic status %d\n", __func__, 326 status.response_code); 327 goto end_free; 328 } 329 } while (retries--); 330 331 WARN_ONCE(1, "%s: ap_aqic status %d\n", __func__, 332 status.response_code); 333 end_free: 334 vfio_ap_free_aqic_resources(q); 335 return status; 336 } 337 338 /** 339 * vfio_ap_validate_nib - validate a notification indicator byte (nib) address. 340 * 341 * @vcpu: the object representing the vcpu executing the PQAP(AQIC) instruction. 342 * @nib: the location for storing the nib address. 343 * 344 * When the PQAP(AQIC) instruction is executed, general register 2 contains the 345 * address of the notification indicator byte (nib) used for IRQ notification. 346 * This function parses and validates the nib from gr2. 347 * 348 * Return: returns zero if the nib address is a valid; otherwise, returns 349 * -EINVAL. 350 */ 351 static int vfio_ap_validate_nib(struct kvm_vcpu *vcpu, dma_addr_t *nib) 352 { 353 *nib = vcpu->run->s.regs.gprs[2]; 354 355 if (!*nib) 356 return -EINVAL; 357 if (kvm_is_error_hva(gfn_to_hva(vcpu->kvm, *nib >> PAGE_SHIFT))) 358 return -EINVAL; 359 360 return 0; 361 } 362 363 static int ensure_nib_shared(unsigned long addr, struct gmap *gmap) 364 { 365 int ret; 366 367 /* 368 * The nib has to be located in shared storage since guest and 369 * host access it. vfio_pin_pages() will do a pin shared and 370 * if that fails (possibly because it's not a shared page) it 371 * calls export. We try to do a second pin shared here so that 372 * the UV gives us an error code if we try to pin a non-shared 373 * page. 374 * 375 * If the page is already pinned shared the UV will return a success. 376 */ 377 ret = uv_pin_shared(addr); 378 if (ret) { 379 /* vfio_pin_pages() likely exported the page so let's re-import */ 380 gmap_convert_to_secure(gmap, addr); 381 } 382 return ret; 383 } 384 385 /** 386 * vfio_ap_irq_enable - Enable Interruption for a APQN 387 * 388 * @q: the vfio_ap_queue holding AQIC parameters 389 * @isc: the guest ISC to register with the GIB interface 390 * @vcpu: the vcpu object containing the registers specifying the parameters 391 * passed to the PQAP(AQIC) instruction. 392 * 393 * Pin the NIB saved in *q 394 * Register the guest ISC to GIB interface and retrieve the 395 * host ISC to issue the host side PQAP/AQIC 396 * 397 * status.response_code may be set to AP_RESPONSE_INVALID_ADDRESS in case the 398 * vfio_pin_pages or kvm_s390_gisc_register failed. 399 * 400 * Otherwise return the ap_queue_status returned by the ap_aqic(), 401 * all retry handling will be done by the guest. 402 * 403 * Return: &struct ap_queue_status 404 */ 405 static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q, 406 int isc, 407 struct kvm_vcpu *vcpu) 408 { 409 union ap_qirq_ctrl aqic_gisa = { .value = 0 }; 410 struct ap_queue_status status = {}; 411 struct kvm_s390_gisa *gisa; 412 struct page *h_page; 413 int nisc; 414 struct kvm *kvm; 415 phys_addr_t h_nib; 416 dma_addr_t nib; 417 int ret; 418 419 /* Verify that the notification indicator byte address is valid */ 420 if (vfio_ap_validate_nib(vcpu, &nib)) { 421 VFIO_AP_DBF_WARN("%s: invalid NIB address: nib=%pad, apqn=%#04x\n", 422 __func__, &nib, q->apqn); 423 424 status.response_code = AP_RESPONSE_INVALID_ADDRESS; 425 return status; 426 } 427 428 ret = vfio_pin_pages(&q->matrix_mdev->vdev, nib, 1, 429 IOMMU_READ | IOMMU_WRITE, &h_page); 430 switch (ret) { 431 case 1: 432 break; 433 default: 434 VFIO_AP_DBF_WARN("%s: vfio_pin_pages failed: rc=%d," 435 "nib=%pad, apqn=%#04x\n", 436 __func__, ret, &nib, q->apqn); 437 438 status.response_code = AP_RESPONSE_INVALID_ADDRESS; 439 return status; 440 } 441 442 kvm = q->matrix_mdev->kvm; 443 gisa = kvm->arch.gisa_int.origin; 444 445 h_nib = page_to_phys(h_page) | (nib & ~PAGE_MASK); 446 aqic_gisa.gisc = isc; 447 448 /* NIB in non-shared storage is a rc 6 for PV guests */ 449 if (kvm_s390_pv_cpu_is_protected(vcpu) && 450 ensure_nib_shared(h_nib & PAGE_MASK, kvm->arch.gmap)) { 451 vfio_unpin_pages(&q->matrix_mdev->vdev, nib, 1); 452 status.response_code = AP_RESPONSE_INVALID_ADDRESS; 453 return status; 454 } 455 456 nisc = kvm_s390_gisc_register(kvm, isc); 457 if (nisc < 0) { 458 VFIO_AP_DBF_WARN("%s: gisc registration failed: nisc=%d, isc=%d, apqn=%#04x\n", 459 __func__, nisc, isc, q->apqn); 460 461 vfio_unpin_pages(&q->matrix_mdev->vdev, nib, 1); 462 status.response_code = AP_RESPONSE_INVALID_ADDRESS; 463 return status; 464 } 465 466 aqic_gisa.isc = nisc; 467 aqic_gisa.ir = 1; 468 aqic_gisa.gisa = virt_to_phys(gisa) >> 4; 469 470 status = ap_aqic(q->apqn, aqic_gisa, h_nib); 471 switch (status.response_code) { 472 case AP_RESPONSE_NORMAL: 473 /* See if we did clear older IRQ configuration */ 474 vfio_ap_free_aqic_resources(q); 475 q->saved_iova = nib; 476 q->saved_isc = isc; 477 break; 478 case AP_RESPONSE_OTHERWISE_CHANGED: 479 /* We could not modify IRQ settings: clear new configuration */ 480 ret = kvm_s390_gisc_unregister(kvm, isc); 481 if (ret) 482 VFIO_AP_DBF_WARN("%s: kvm_s390_gisc_unregister: rc=%d isc=%d, apqn=%#04x\n", 483 __func__, ret, isc, q->apqn); 484 vfio_unpin_pages(&q->matrix_mdev->vdev, nib, 1); 485 break; 486 default: 487 pr_warn("%s: apqn %04x: response: %02x\n", __func__, q->apqn, 488 status.response_code); 489 vfio_ap_irq_disable(q); 490 break; 491 } 492 493 if (status.response_code != AP_RESPONSE_NORMAL) { 494 VFIO_AP_DBF_WARN("%s: PQAP(AQIC) failed with status=%#02x: " 495 "zone=%#x, ir=%#x, gisc=%#x, f=%#x," 496 "gisa=%#x, isc=%#x, apqn=%#04x\n", 497 __func__, status.response_code, 498 aqic_gisa.zone, aqic_gisa.ir, aqic_gisa.gisc, 499 aqic_gisa.gf, aqic_gisa.gisa, aqic_gisa.isc, 500 q->apqn); 501 } 502 503 return status; 504 } 505 506 /** 507 * vfio_ap_le_guid_to_be_uuid - convert a little endian guid array into an array 508 * of big endian elements that can be passed by 509 * value to an s390dbf sprintf event function to 510 * format a UUID string. 511 * 512 * @guid: the object containing the little endian guid 513 * @uuid: a six-element array of long values that can be passed by value as 514 * arguments for a formatting string specifying a UUID. 515 * 516 * The S390 Debug Feature (s390dbf) allows the use of "%s" in the sprintf 517 * event functions if the memory for the passed string is available as long as 518 * the debug feature exists. Since a mediated device can be removed at any 519 * time, it's name can not be used because %s passes the reference to the string 520 * in memory and the reference will go stale once the device is removed . 521 * 522 * The s390dbf string formatting function allows a maximum of 9 arguments for a 523 * message to be displayed in the 'sprintf' view. In order to use the bytes 524 * comprising the mediated device's UUID to display the mediated device name, 525 * they will have to be converted into an array whose elements can be passed by 526 * value to sprintf. For example: 527 * 528 * guid array: { 83, 78, 17, 62, bb, f1, f0, 47, 91, 4d, 32, a2, 2e, 3a, 88, 04 } 529 * mdev name: 62177883-f1bb-47f0-914d-32a22e3a8804 530 * array returned: { 62177883, f1bb, 47f0, 914d, 32a2, 2e3a8804 } 531 * formatting string: "%08lx-%04lx-%04lx-%04lx-%02lx%04lx" 532 */ 533 static void vfio_ap_le_guid_to_be_uuid(guid_t *guid, unsigned long *uuid) 534 { 535 /* 536 * The input guid is ordered in little endian, so it needs to be 537 * reordered for displaying a UUID as a string. This specifies the 538 * guid indices in proper order. 539 */ 540 uuid[0] = le32_to_cpup((__le32 *)guid); 541 uuid[1] = le16_to_cpup((__le16 *)&guid->b[4]); 542 uuid[2] = le16_to_cpup((__le16 *)&guid->b[6]); 543 uuid[3] = *((__u16 *)&guid->b[8]); 544 uuid[4] = *((__u16 *)&guid->b[10]); 545 uuid[5] = *((__u32 *)&guid->b[12]); 546 } 547 548 /** 549 * handle_pqap - PQAP instruction callback 550 * 551 * @vcpu: The vcpu on which we received the PQAP instruction 552 * 553 * Get the general register contents to initialize internal variables. 554 * REG[0]: APQN 555 * REG[1]: IR and ISC 556 * REG[2]: NIB 557 * 558 * Response.status may be set to following Response Code: 559 * - AP_RESPONSE_Q_NOT_AVAIL: if the queue is not available 560 * - AP_RESPONSE_DECONFIGURED: if the queue is not configured 561 * - AP_RESPONSE_NORMAL (0) : in case of success 562 * Check vfio_ap_setirq() and vfio_ap_clrirq() for other possible RC. 563 * We take the matrix_dev lock to ensure serialization on queues and 564 * mediated device access. 565 * 566 * Return: 0 if we could handle the request inside KVM. 567 * Otherwise, returns -EOPNOTSUPP to let QEMU handle the fault. 568 */ 569 static int handle_pqap(struct kvm_vcpu *vcpu) 570 { 571 uint64_t status; 572 uint16_t apqn; 573 unsigned long uuid[6]; 574 struct vfio_ap_queue *q; 575 struct ap_queue_status qstatus = { 576 .response_code = AP_RESPONSE_Q_NOT_AVAIL, }; 577 struct ap_matrix_mdev *matrix_mdev; 578 579 apqn = vcpu->run->s.regs.gprs[0] & 0xffff; 580 581 /* If we do not use the AIV facility just go to userland */ 582 if (!(vcpu->arch.sie_block->eca & ECA_AIV)) { 583 VFIO_AP_DBF_WARN("%s: AIV facility not installed: apqn=0x%04x, eca=0x%04x\n", 584 __func__, apqn, vcpu->arch.sie_block->eca); 585 586 return -EOPNOTSUPP; 587 } 588 589 mutex_lock(&matrix_dev->mdevs_lock); 590 591 if (!vcpu->kvm->arch.crypto.pqap_hook) { 592 VFIO_AP_DBF_WARN("%s: PQAP(AQIC) hook not registered with the vfio_ap driver: apqn=0x%04x\n", 593 __func__, apqn); 594 595 goto out_unlock; 596 } 597 598 matrix_mdev = container_of(vcpu->kvm->arch.crypto.pqap_hook, 599 struct ap_matrix_mdev, pqap_hook); 600 601 /* If the there is no guest using the mdev, there is nothing to do */ 602 if (!matrix_mdev->kvm) { 603 vfio_ap_le_guid_to_be_uuid(&matrix_mdev->mdev->uuid, uuid); 604 VFIO_AP_DBF_WARN("%s: mdev %08lx-%04lx-%04lx-%04lx-%04lx%08lx not in use: apqn=0x%04x\n", 605 __func__, uuid[0], uuid[1], uuid[2], 606 uuid[3], uuid[4], uuid[5], apqn); 607 goto out_unlock; 608 } 609 610 q = vfio_ap_mdev_get_queue(matrix_mdev, apqn); 611 if (!q) { 612 VFIO_AP_DBF_WARN("%s: Queue %02x.%04x not bound to the vfio_ap driver\n", 613 __func__, AP_QID_CARD(apqn), 614 AP_QID_QUEUE(apqn)); 615 goto out_unlock; 616 } 617 618 status = vcpu->run->s.regs.gprs[1]; 619 620 /* If IR bit(16) is set we enable the interrupt */ 621 if ((status >> (63 - 16)) & 0x01) 622 qstatus = vfio_ap_irq_enable(q, status & 0x07, vcpu); 623 else 624 qstatus = vfio_ap_irq_disable(q); 625 626 out_unlock: 627 memcpy(&vcpu->run->s.regs.gprs[1], &qstatus, sizeof(qstatus)); 628 vcpu->run->s.regs.gprs[1] >>= 32; 629 mutex_unlock(&matrix_dev->mdevs_lock); 630 return 0; 631 } 632 633 static void vfio_ap_matrix_init(struct ap_config_info *info, 634 struct ap_matrix *matrix) 635 { 636 matrix->apm_max = info->apxa ? info->na : 63; 637 matrix->aqm_max = info->apxa ? info->nd : 15; 638 matrix->adm_max = info->apxa ? info->nd : 15; 639 } 640 641 static void vfio_ap_mdev_update_guest_apcb(struct ap_matrix_mdev *matrix_mdev) 642 { 643 if (matrix_mdev->kvm) 644 kvm_arch_crypto_set_masks(matrix_mdev->kvm, 645 matrix_mdev->shadow_apcb.apm, 646 matrix_mdev->shadow_apcb.aqm, 647 matrix_mdev->shadow_apcb.adm); 648 } 649 650 static bool vfio_ap_mdev_filter_cdoms(struct ap_matrix_mdev *matrix_mdev) 651 { 652 DECLARE_BITMAP(prev_shadow_adm, AP_DOMAINS); 653 654 bitmap_copy(prev_shadow_adm, matrix_mdev->shadow_apcb.adm, AP_DOMAINS); 655 bitmap_and(matrix_mdev->shadow_apcb.adm, matrix_mdev->matrix.adm, 656 (unsigned long *)matrix_dev->info.adm, AP_DOMAINS); 657 658 return !bitmap_equal(prev_shadow_adm, matrix_mdev->shadow_apcb.adm, 659 AP_DOMAINS); 660 } 661 662 /* 663 * vfio_ap_mdev_filter_matrix - filter the APQNs assigned to the matrix mdev 664 * to ensure no queue devices are passed through to 665 * the guest that are not bound to the vfio_ap 666 * device driver. 667 * 668 * @matrix_mdev: the matrix mdev whose matrix is to be filtered. 669 * @apm_filtered: a 256-bit bitmap for storing the APIDs filtered from the 670 * guest's AP configuration that are still in the host's AP 671 * configuration. 672 * 673 * Note: If an APQN referencing a queue device that is not bound to the vfio_ap 674 * driver, its APID will be filtered from the guest's APCB. The matrix 675 * structure precludes filtering an individual APQN, so its APID will be 676 * filtered. Consequently, all queues associated with the adapter that 677 * are in the host's AP configuration must be reset. If queues are 678 * subsequently made available again to the guest, they should re-appear 679 * in a reset state 680 * 681 * Return: a boolean value indicating whether the KVM guest's APCB was changed 682 * by the filtering or not. 683 */ 684 static bool vfio_ap_mdev_filter_matrix(struct ap_matrix_mdev *matrix_mdev, 685 unsigned long *apm_filtered) 686 { 687 unsigned long apid, apqi, apqn; 688 DECLARE_BITMAP(prev_shadow_apm, AP_DEVICES); 689 DECLARE_BITMAP(prev_shadow_aqm, AP_DOMAINS); 690 struct vfio_ap_queue *q; 691 692 bitmap_copy(prev_shadow_apm, matrix_mdev->shadow_apcb.apm, AP_DEVICES); 693 bitmap_copy(prev_shadow_aqm, matrix_mdev->shadow_apcb.aqm, AP_DOMAINS); 694 vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->shadow_apcb); 695 bitmap_clear(apm_filtered, 0, AP_DEVICES); 696 697 /* 698 * Copy the adapters, domains and control domains to the shadow_apcb 699 * from the matrix mdev, but only those that are assigned to the host's 700 * AP configuration. 701 */ 702 bitmap_and(matrix_mdev->shadow_apcb.apm, matrix_mdev->matrix.apm, 703 (unsigned long *)matrix_dev->info.apm, AP_DEVICES); 704 bitmap_and(matrix_mdev->shadow_apcb.aqm, matrix_mdev->matrix.aqm, 705 (unsigned long *)matrix_dev->info.aqm, AP_DOMAINS); 706 707 for_each_set_bit_inv(apid, matrix_mdev->shadow_apcb.apm, AP_DEVICES) { 708 for_each_set_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm, 709 AP_DOMAINS) { 710 /* 711 * If the APQN is not bound to the vfio_ap device 712 * driver, then we can't assign it to the guest's 713 * AP configuration. The AP architecture won't 714 * allow filtering of a single APQN, so let's filter 715 * the APID since an adapter represents a physical 716 * hardware device. 717 */ 718 apqn = AP_MKQID(apid, apqi); 719 q = vfio_ap_mdev_get_queue(matrix_mdev, apqn); 720 if (!q || q->reset_status.response_code) { 721 clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm); 722 723 /* 724 * If the adapter was previously plugged into 725 * the guest, let's let the caller know that 726 * the APID was filtered. 727 */ 728 if (test_bit_inv(apid, prev_shadow_apm)) 729 set_bit_inv(apid, apm_filtered); 730 731 break; 732 } 733 } 734 } 735 736 return !bitmap_equal(prev_shadow_apm, matrix_mdev->shadow_apcb.apm, 737 AP_DEVICES) || 738 !bitmap_equal(prev_shadow_aqm, matrix_mdev->shadow_apcb.aqm, 739 AP_DOMAINS); 740 } 741 742 static int vfio_ap_mdev_init_dev(struct vfio_device *vdev) 743 { 744 struct ap_matrix_mdev *matrix_mdev = 745 container_of(vdev, struct ap_matrix_mdev, vdev); 746 747 matrix_mdev->mdev = to_mdev_device(vdev->dev); 748 vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->matrix); 749 matrix_mdev->pqap_hook = handle_pqap; 750 vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->shadow_apcb); 751 hash_init(matrix_mdev->qtable.queues); 752 753 return 0; 754 } 755 756 static int vfio_ap_mdev_probe(struct mdev_device *mdev) 757 { 758 struct ap_matrix_mdev *matrix_mdev; 759 int ret; 760 761 matrix_mdev = vfio_alloc_device(ap_matrix_mdev, vdev, &mdev->dev, 762 &vfio_ap_matrix_dev_ops); 763 if (IS_ERR(matrix_mdev)) 764 return PTR_ERR(matrix_mdev); 765 766 ret = vfio_register_emulated_iommu_dev(&matrix_mdev->vdev); 767 if (ret) 768 goto err_put_vdev; 769 matrix_mdev->req_trigger = NULL; 770 dev_set_drvdata(&mdev->dev, matrix_mdev); 771 mutex_lock(&matrix_dev->mdevs_lock); 772 list_add(&matrix_mdev->node, &matrix_dev->mdev_list); 773 mutex_unlock(&matrix_dev->mdevs_lock); 774 return 0; 775 776 err_put_vdev: 777 vfio_put_device(&matrix_mdev->vdev); 778 return ret; 779 } 780 781 static void vfio_ap_mdev_link_queue(struct ap_matrix_mdev *matrix_mdev, 782 struct vfio_ap_queue *q) 783 { 784 if (q) { 785 q->matrix_mdev = matrix_mdev; 786 hash_add(matrix_mdev->qtable.queues, &q->mdev_qnode, q->apqn); 787 } 788 } 789 790 static void vfio_ap_mdev_link_apqn(struct ap_matrix_mdev *matrix_mdev, int apqn) 791 { 792 struct vfio_ap_queue *q; 793 794 q = vfio_ap_find_queue(apqn); 795 vfio_ap_mdev_link_queue(matrix_mdev, q); 796 } 797 798 static void vfio_ap_unlink_queue_fr_mdev(struct vfio_ap_queue *q) 799 { 800 hash_del(&q->mdev_qnode); 801 } 802 803 static void vfio_ap_unlink_mdev_fr_queue(struct vfio_ap_queue *q) 804 { 805 q->matrix_mdev = NULL; 806 } 807 808 static void vfio_ap_mdev_unlink_fr_queues(struct ap_matrix_mdev *matrix_mdev) 809 { 810 struct vfio_ap_queue *q; 811 unsigned long apid, apqi; 812 813 for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES) { 814 for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, 815 AP_DOMAINS) { 816 q = vfio_ap_mdev_get_queue(matrix_mdev, 817 AP_MKQID(apid, apqi)); 818 if (q) 819 q->matrix_mdev = NULL; 820 } 821 } 822 } 823 824 static void vfio_ap_mdev_remove(struct mdev_device *mdev) 825 { 826 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(&mdev->dev); 827 828 vfio_unregister_group_dev(&matrix_mdev->vdev); 829 830 mutex_lock(&matrix_dev->guests_lock); 831 mutex_lock(&matrix_dev->mdevs_lock); 832 vfio_ap_mdev_reset_queues(matrix_mdev); 833 vfio_ap_mdev_unlink_fr_queues(matrix_mdev); 834 list_del(&matrix_mdev->node); 835 mutex_unlock(&matrix_dev->mdevs_lock); 836 mutex_unlock(&matrix_dev->guests_lock); 837 vfio_put_device(&matrix_mdev->vdev); 838 } 839 840 #define MDEV_SHARING_ERR "Userspace may not re-assign queue %02lx.%04lx " \ 841 "already assigned to %s" 842 843 static void vfio_ap_mdev_log_sharing_err(struct ap_matrix_mdev *matrix_mdev, 844 unsigned long *apm, 845 unsigned long *aqm) 846 { 847 unsigned long apid, apqi; 848 const struct device *dev = mdev_dev(matrix_mdev->mdev); 849 const char *mdev_name = dev_name(dev); 850 851 for_each_set_bit_inv(apid, apm, AP_DEVICES) 852 for_each_set_bit_inv(apqi, aqm, AP_DOMAINS) 853 dev_warn(dev, MDEV_SHARING_ERR, apid, apqi, mdev_name); 854 } 855 856 /** 857 * vfio_ap_mdev_verify_no_sharing - verify APQNs are not shared by matrix mdevs 858 * 859 * @mdev_apm: mask indicating the APIDs of the APQNs to be verified 860 * @mdev_aqm: mask indicating the APQIs of the APQNs to be verified 861 * 862 * Verifies that each APQN derived from the Cartesian product of a bitmap of 863 * AP adapter IDs and AP queue indexes is not configured for any matrix 864 * mediated device. AP queue sharing is not allowed. 865 * 866 * Return: 0 if the APQNs are not shared; otherwise return -EADDRINUSE. 867 */ 868 static int vfio_ap_mdev_verify_no_sharing(unsigned long *mdev_apm, 869 unsigned long *mdev_aqm) 870 { 871 struct ap_matrix_mdev *matrix_mdev; 872 DECLARE_BITMAP(apm, AP_DEVICES); 873 DECLARE_BITMAP(aqm, AP_DOMAINS); 874 875 list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) { 876 /* 877 * If the input apm and aqm are fields of the matrix_mdev 878 * object, then move on to the next matrix_mdev. 879 */ 880 if (mdev_apm == matrix_mdev->matrix.apm && 881 mdev_aqm == matrix_mdev->matrix.aqm) 882 continue; 883 884 memset(apm, 0, sizeof(apm)); 885 memset(aqm, 0, sizeof(aqm)); 886 887 /* 888 * We work on full longs, as we can only exclude the leftover 889 * bits in non-inverse order. The leftover is all zeros. 890 */ 891 if (!bitmap_and(apm, mdev_apm, matrix_mdev->matrix.apm, 892 AP_DEVICES)) 893 continue; 894 895 if (!bitmap_and(aqm, mdev_aqm, matrix_mdev->matrix.aqm, 896 AP_DOMAINS)) 897 continue; 898 899 vfio_ap_mdev_log_sharing_err(matrix_mdev, apm, aqm); 900 901 return -EADDRINUSE; 902 } 903 904 return 0; 905 } 906 907 /** 908 * vfio_ap_mdev_validate_masks - verify that the APQNs assigned to the mdev are 909 * not reserved for the default zcrypt driver and 910 * are not assigned to another mdev. 911 * 912 * @matrix_mdev: the mdev to which the APQNs being validated are assigned. 913 * 914 * Return: One of the following values: 915 * o the error returned from the ap_apqn_in_matrix_owned_by_def_drv() function, 916 * most likely -EBUSY indicating the ap_perms_mutex lock is already held. 917 * o EADDRNOTAVAIL if an APQN assigned to @matrix_mdev is reserved for the 918 * zcrypt default driver. 919 * o EADDRINUSE if an APQN assigned to @matrix_mdev is assigned to another mdev 920 * o A zero indicating validation succeeded. 921 */ 922 static int vfio_ap_mdev_validate_masks(struct ap_matrix_mdev *matrix_mdev) 923 { 924 if (ap_apqn_in_matrix_owned_by_def_drv(matrix_mdev->matrix.apm, 925 matrix_mdev->matrix.aqm)) 926 return -EADDRNOTAVAIL; 927 928 return vfio_ap_mdev_verify_no_sharing(matrix_mdev->matrix.apm, 929 matrix_mdev->matrix.aqm); 930 } 931 932 static void vfio_ap_mdev_link_adapter(struct ap_matrix_mdev *matrix_mdev, 933 unsigned long apid) 934 { 935 unsigned long apqi; 936 937 for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, AP_DOMAINS) 938 vfio_ap_mdev_link_apqn(matrix_mdev, 939 AP_MKQID(apid, apqi)); 940 } 941 942 static void collect_queues_to_reset(struct ap_matrix_mdev *matrix_mdev, 943 unsigned long apid, 944 struct list_head *qlist) 945 { 946 struct vfio_ap_queue *q; 947 unsigned long apqi; 948 949 for_each_set_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm, AP_DOMAINS) { 950 q = vfio_ap_mdev_get_queue(matrix_mdev, AP_MKQID(apid, apqi)); 951 if (q) 952 list_add_tail(&q->reset_qnode, qlist); 953 } 954 } 955 956 static void reset_queues_for_apid(struct ap_matrix_mdev *matrix_mdev, 957 unsigned long apid) 958 { 959 struct list_head qlist; 960 961 INIT_LIST_HEAD(&qlist); 962 collect_queues_to_reset(matrix_mdev, apid, &qlist); 963 vfio_ap_mdev_reset_qlist(&qlist); 964 } 965 966 static int reset_queues_for_apids(struct ap_matrix_mdev *matrix_mdev, 967 unsigned long *apm_reset) 968 { 969 struct list_head qlist; 970 unsigned long apid; 971 972 if (bitmap_empty(apm_reset, AP_DEVICES)) 973 return 0; 974 975 INIT_LIST_HEAD(&qlist); 976 977 for_each_set_bit_inv(apid, apm_reset, AP_DEVICES) 978 collect_queues_to_reset(matrix_mdev, apid, &qlist); 979 980 return vfio_ap_mdev_reset_qlist(&qlist); 981 } 982 983 /** 984 * assign_adapter_store - parses the APID from @buf and sets the 985 * corresponding bit in the mediated matrix device's APM 986 * 987 * @dev: the matrix device 988 * @attr: the mediated matrix device's assign_adapter attribute 989 * @buf: a buffer containing the AP adapter number (APID) to 990 * be assigned 991 * @count: the number of bytes in @buf 992 * 993 * Return: the number of bytes processed if the APID is valid; otherwise, 994 * returns one of the following errors: 995 * 996 * 1. -EINVAL 997 * The APID is not a valid number 998 * 999 * 2. -ENODEV 1000 * The APID exceeds the maximum value configured for the system 1001 * 1002 * 3. -EADDRNOTAVAIL 1003 * An APQN derived from the cross product of the APID being assigned 1004 * and the APQIs previously assigned is not bound to the vfio_ap device 1005 * driver; or, if no APQIs have yet been assigned, the APID is not 1006 * contained in an APQN bound to the vfio_ap device driver. 1007 * 1008 * 4. -EADDRINUSE 1009 * An APQN derived from the cross product of the APID being assigned 1010 * and the APQIs previously assigned is being used by another mediated 1011 * matrix device 1012 * 1013 * 5. -EAGAIN 1014 * A lock required to validate the mdev's AP configuration could not 1015 * be obtained. 1016 */ 1017 static ssize_t assign_adapter_store(struct device *dev, 1018 struct device_attribute *attr, 1019 const char *buf, size_t count) 1020 { 1021 int ret; 1022 unsigned long apid; 1023 DECLARE_BITMAP(apm_filtered, AP_DEVICES); 1024 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); 1025 1026 mutex_lock(&ap_perms_mutex); 1027 get_update_locks_for_mdev(matrix_mdev); 1028 1029 ret = kstrtoul(buf, 0, &apid); 1030 if (ret) 1031 goto done; 1032 1033 if (apid > matrix_mdev->matrix.apm_max) { 1034 ret = -ENODEV; 1035 goto done; 1036 } 1037 1038 if (test_bit_inv(apid, matrix_mdev->matrix.apm)) { 1039 ret = count; 1040 goto done; 1041 } 1042 1043 set_bit_inv(apid, matrix_mdev->matrix.apm); 1044 1045 ret = vfio_ap_mdev_validate_masks(matrix_mdev); 1046 if (ret) { 1047 clear_bit_inv(apid, matrix_mdev->matrix.apm); 1048 goto done; 1049 } 1050 1051 vfio_ap_mdev_link_adapter(matrix_mdev, apid); 1052 1053 if (vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered)) { 1054 vfio_ap_mdev_update_guest_apcb(matrix_mdev); 1055 reset_queues_for_apids(matrix_mdev, apm_filtered); 1056 } 1057 1058 ret = count; 1059 done: 1060 release_update_locks_for_mdev(matrix_mdev); 1061 mutex_unlock(&ap_perms_mutex); 1062 1063 return ret; 1064 } 1065 static DEVICE_ATTR_WO(assign_adapter); 1066 1067 static struct vfio_ap_queue 1068 *vfio_ap_unlink_apqn_fr_mdev(struct ap_matrix_mdev *matrix_mdev, 1069 unsigned long apid, unsigned long apqi) 1070 { 1071 struct vfio_ap_queue *q = NULL; 1072 1073 q = vfio_ap_mdev_get_queue(matrix_mdev, AP_MKQID(apid, apqi)); 1074 /* If the queue is assigned to the matrix mdev, unlink it. */ 1075 if (q) 1076 vfio_ap_unlink_queue_fr_mdev(q); 1077 1078 return q; 1079 } 1080 1081 /** 1082 * vfio_ap_mdev_unlink_adapter - unlink all queues associated with unassigned 1083 * adapter from the matrix mdev to which the 1084 * adapter was assigned. 1085 * @matrix_mdev: the matrix mediated device to which the adapter was assigned. 1086 * @apid: the APID of the unassigned adapter. 1087 * @qlist: list for storing queues associated with unassigned adapter that 1088 * need to be reset. 1089 */ 1090 static void vfio_ap_mdev_unlink_adapter(struct ap_matrix_mdev *matrix_mdev, 1091 unsigned long apid, 1092 struct list_head *qlist) 1093 { 1094 unsigned long apqi; 1095 struct vfio_ap_queue *q; 1096 1097 for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, AP_DOMAINS) { 1098 q = vfio_ap_unlink_apqn_fr_mdev(matrix_mdev, apid, apqi); 1099 1100 if (q && qlist) { 1101 if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) && 1102 test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) 1103 list_add_tail(&q->reset_qnode, qlist); 1104 } 1105 } 1106 } 1107 1108 static void vfio_ap_mdev_hot_unplug_adapter(struct ap_matrix_mdev *matrix_mdev, 1109 unsigned long apid) 1110 { 1111 struct vfio_ap_queue *q, *tmpq; 1112 struct list_head qlist; 1113 1114 INIT_LIST_HEAD(&qlist); 1115 vfio_ap_mdev_unlink_adapter(matrix_mdev, apid, &qlist); 1116 1117 if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm)) { 1118 clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm); 1119 vfio_ap_mdev_update_guest_apcb(matrix_mdev); 1120 } 1121 1122 vfio_ap_mdev_reset_qlist(&qlist); 1123 1124 list_for_each_entry_safe(q, tmpq, &qlist, reset_qnode) { 1125 vfio_ap_unlink_mdev_fr_queue(q); 1126 list_del(&q->reset_qnode); 1127 } 1128 } 1129 1130 /** 1131 * unassign_adapter_store - parses the APID from @buf and clears the 1132 * corresponding bit in the mediated matrix device's APM 1133 * 1134 * @dev: the matrix device 1135 * @attr: the mediated matrix device's unassign_adapter attribute 1136 * @buf: a buffer containing the adapter number (APID) to be unassigned 1137 * @count: the number of bytes in @buf 1138 * 1139 * Return: the number of bytes processed if the APID is valid; otherwise, 1140 * returns one of the following errors: 1141 * -EINVAL if the APID is not a number 1142 * -ENODEV if the APID it exceeds the maximum value configured for the 1143 * system 1144 */ 1145 static ssize_t unassign_adapter_store(struct device *dev, 1146 struct device_attribute *attr, 1147 const char *buf, size_t count) 1148 { 1149 int ret; 1150 unsigned long apid; 1151 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); 1152 1153 get_update_locks_for_mdev(matrix_mdev); 1154 1155 ret = kstrtoul(buf, 0, &apid); 1156 if (ret) 1157 goto done; 1158 1159 if (apid > matrix_mdev->matrix.apm_max) { 1160 ret = -ENODEV; 1161 goto done; 1162 } 1163 1164 if (!test_bit_inv(apid, matrix_mdev->matrix.apm)) { 1165 ret = count; 1166 goto done; 1167 } 1168 1169 clear_bit_inv((unsigned long)apid, matrix_mdev->matrix.apm); 1170 vfio_ap_mdev_hot_unplug_adapter(matrix_mdev, apid); 1171 ret = count; 1172 done: 1173 release_update_locks_for_mdev(matrix_mdev); 1174 return ret; 1175 } 1176 static DEVICE_ATTR_WO(unassign_adapter); 1177 1178 static void vfio_ap_mdev_link_domain(struct ap_matrix_mdev *matrix_mdev, 1179 unsigned long apqi) 1180 { 1181 unsigned long apid; 1182 1183 for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES) 1184 vfio_ap_mdev_link_apqn(matrix_mdev, 1185 AP_MKQID(apid, apqi)); 1186 } 1187 1188 /** 1189 * assign_domain_store - parses the APQI from @buf and sets the 1190 * corresponding bit in the mediated matrix device's AQM 1191 * 1192 * @dev: the matrix device 1193 * @attr: the mediated matrix device's assign_domain attribute 1194 * @buf: a buffer containing the AP queue index (APQI) of the domain to 1195 * be assigned 1196 * @count: the number of bytes in @buf 1197 * 1198 * Return: the number of bytes processed if the APQI is valid; otherwise returns 1199 * one of the following errors: 1200 * 1201 * 1. -EINVAL 1202 * The APQI is not a valid number 1203 * 1204 * 2. -ENODEV 1205 * The APQI exceeds the maximum value configured for the system 1206 * 1207 * 3. -EADDRNOTAVAIL 1208 * An APQN derived from the cross product of the APQI being assigned 1209 * and the APIDs previously assigned is not bound to the vfio_ap device 1210 * driver; or, if no APIDs have yet been assigned, the APQI is not 1211 * contained in an APQN bound to the vfio_ap device driver. 1212 * 1213 * 4. -EADDRINUSE 1214 * An APQN derived from the cross product of the APQI being assigned 1215 * and the APIDs previously assigned is being used by another mediated 1216 * matrix device 1217 * 1218 * 5. -EAGAIN 1219 * The lock required to validate the mdev's AP configuration could not 1220 * be obtained. 1221 */ 1222 static ssize_t assign_domain_store(struct device *dev, 1223 struct device_attribute *attr, 1224 const char *buf, size_t count) 1225 { 1226 int ret; 1227 unsigned long apqi; 1228 DECLARE_BITMAP(apm_filtered, AP_DEVICES); 1229 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); 1230 1231 mutex_lock(&ap_perms_mutex); 1232 get_update_locks_for_mdev(matrix_mdev); 1233 1234 ret = kstrtoul(buf, 0, &apqi); 1235 if (ret) 1236 goto done; 1237 1238 if (apqi > matrix_mdev->matrix.aqm_max) { 1239 ret = -ENODEV; 1240 goto done; 1241 } 1242 1243 if (test_bit_inv(apqi, matrix_mdev->matrix.aqm)) { 1244 ret = count; 1245 goto done; 1246 } 1247 1248 set_bit_inv(apqi, matrix_mdev->matrix.aqm); 1249 1250 ret = vfio_ap_mdev_validate_masks(matrix_mdev); 1251 if (ret) { 1252 clear_bit_inv(apqi, matrix_mdev->matrix.aqm); 1253 goto done; 1254 } 1255 1256 vfio_ap_mdev_link_domain(matrix_mdev, apqi); 1257 1258 if (vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered)) { 1259 vfio_ap_mdev_update_guest_apcb(matrix_mdev); 1260 reset_queues_for_apids(matrix_mdev, apm_filtered); 1261 } 1262 1263 ret = count; 1264 done: 1265 release_update_locks_for_mdev(matrix_mdev); 1266 mutex_unlock(&ap_perms_mutex); 1267 1268 return ret; 1269 } 1270 static DEVICE_ATTR_WO(assign_domain); 1271 1272 static void vfio_ap_mdev_unlink_domain(struct ap_matrix_mdev *matrix_mdev, 1273 unsigned long apqi, 1274 struct list_head *qlist) 1275 { 1276 unsigned long apid; 1277 struct vfio_ap_queue *q; 1278 1279 for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES) { 1280 q = vfio_ap_unlink_apqn_fr_mdev(matrix_mdev, apid, apqi); 1281 1282 if (q && qlist) { 1283 if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) && 1284 test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) 1285 list_add_tail(&q->reset_qnode, qlist); 1286 } 1287 } 1288 } 1289 1290 static void vfio_ap_mdev_hot_unplug_domain(struct ap_matrix_mdev *matrix_mdev, 1291 unsigned long apqi) 1292 { 1293 struct vfio_ap_queue *q, *tmpq; 1294 struct list_head qlist; 1295 1296 INIT_LIST_HEAD(&qlist); 1297 vfio_ap_mdev_unlink_domain(matrix_mdev, apqi, &qlist); 1298 1299 if (test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) { 1300 clear_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm); 1301 vfio_ap_mdev_update_guest_apcb(matrix_mdev); 1302 } 1303 1304 vfio_ap_mdev_reset_qlist(&qlist); 1305 1306 list_for_each_entry_safe(q, tmpq, &qlist, reset_qnode) { 1307 vfio_ap_unlink_mdev_fr_queue(q); 1308 list_del(&q->reset_qnode); 1309 } 1310 } 1311 1312 /** 1313 * unassign_domain_store - parses the APQI from @buf and clears the 1314 * corresponding bit in the mediated matrix device's AQM 1315 * 1316 * @dev: the matrix device 1317 * @attr: the mediated matrix device's unassign_domain attribute 1318 * @buf: a buffer containing the AP queue index (APQI) of the domain to 1319 * be unassigned 1320 * @count: the number of bytes in @buf 1321 * 1322 * Return: the number of bytes processed if the APQI is valid; otherwise, 1323 * returns one of the following errors: 1324 * -EINVAL if the APQI is not a number 1325 * -ENODEV if the APQI exceeds the maximum value configured for the system 1326 */ 1327 static ssize_t unassign_domain_store(struct device *dev, 1328 struct device_attribute *attr, 1329 const char *buf, size_t count) 1330 { 1331 int ret; 1332 unsigned long apqi; 1333 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); 1334 1335 get_update_locks_for_mdev(matrix_mdev); 1336 1337 ret = kstrtoul(buf, 0, &apqi); 1338 if (ret) 1339 goto done; 1340 1341 if (apqi > matrix_mdev->matrix.aqm_max) { 1342 ret = -ENODEV; 1343 goto done; 1344 } 1345 1346 if (!test_bit_inv(apqi, matrix_mdev->matrix.aqm)) { 1347 ret = count; 1348 goto done; 1349 } 1350 1351 clear_bit_inv((unsigned long)apqi, matrix_mdev->matrix.aqm); 1352 vfio_ap_mdev_hot_unplug_domain(matrix_mdev, apqi); 1353 ret = count; 1354 1355 done: 1356 release_update_locks_for_mdev(matrix_mdev); 1357 return ret; 1358 } 1359 static DEVICE_ATTR_WO(unassign_domain); 1360 1361 /** 1362 * assign_control_domain_store - parses the domain ID from @buf and sets 1363 * the corresponding bit in the mediated matrix device's ADM 1364 * 1365 * @dev: the matrix device 1366 * @attr: the mediated matrix device's assign_control_domain attribute 1367 * @buf: a buffer containing the domain ID to be assigned 1368 * @count: the number of bytes in @buf 1369 * 1370 * Return: the number of bytes processed if the domain ID is valid; otherwise, 1371 * returns one of the following errors: 1372 * -EINVAL if the ID is not a number 1373 * -ENODEV if the ID exceeds the maximum value configured for the system 1374 */ 1375 static ssize_t assign_control_domain_store(struct device *dev, 1376 struct device_attribute *attr, 1377 const char *buf, size_t count) 1378 { 1379 int ret; 1380 unsigned long id; 1381 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); 1382 1383 get_update_locks_for_mdev(matrix_mdev); 1384 1385 ret = kstrtoul(buf, 0, &id); 1386 if (ret) 1387 goto done; 1388 1389 if (id > matrix_mdev->matrix.adm_max) { 1390 ret = -ENODEV; 1391 goto done; 1392 } 1393 1394 if (test_bit_inv(id, matrix_mdev->matrix.adm)) { 1395 ret = count; 1396 goto done; 1397 } 1398 1399 /* Set the bit in the ADM (bitmask) corresponding to the AP control 1400 * domain number (id). The bits in the mask, from most significant to 1401 * least significant, correspond to IDs 0 up to the one less than the 1402 * number of control domains that can be assigned. 1403 */ 1404 set_bit_inv(id, matrix_mdev->matrix.adm); 1405 if (vfio_ap_mdev_filter_cdoms(matrix_mdev)) 1406 vfio_ap_mdev_update_guest_apcb(matrix_mdev); 1407 1408 ret = count; 1409 done: 1410 release_update_locks_for_mdev(matrix_mdev); 1411 return ret; 1412 } 1413 static DEVICE_ATTR_WO(assign_control_domain); 1414 1415 /** 1416 * unassign_control_domain_store - parses the domain ID from @buf and 1417 * clears the corresponding bit in the mediated matrix device's ADM 1418 * 1419 * @dev: the matrix device 1420 * @attr: the mediated matrix device's unassign_control_domain attribute 1421 * @buf: a buffer containing the domain ID to be unassigned 1422 * @count: the number of bytes in @buf 1423 * 1424 * Return: the number of bytes processed if the domain ID is valid; otherwise, 1425 * returns one of the following errors: 1426 * -EINVAL if the ID is not a number 1427 * -ENODEV if the ID exceeds the maximum value configured for the system 1428 */ 1429 static ssize_t unassign_control_domain_store(struct device *dev, 1430 struct device_attribute *attr, 1431 const char *buf, size_t count) 1432 { 1433 int ret; 1434 unsigned long domid; 1435 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); 1436 1437 get_update_locks_for_mdev(matrix_mdev); 1438 1439 ret = kstrtoul(buf, 0, &domid); 1440 if (ret) 1441 goto done; 1442 1443 if (domid > matrix_mdev->matrix.adm_max) { 1444 ret = -ENODEV; 1445 goto done; 1446 } 1447 1448 if (!test_bit_inv(domid, matrix_mdev->matrix.adm)) { 1449 ret = count; 1450 goto done; 1451 } 1452 1453 clear_bit_inv(domid, matrix_mdev->matrix.adm); 1454 1455 if (test_bit_inv(domid, matrix_mdev->shadow_apcb.adm)) { 1456 clear_bit_inv(domid, matrix_mdev->shadow_apcb.adm); 1457 vfio_ap_mdev_update_guest_apcb(matrix_mdev); 1458 } 1459 1460 ret = count; 1461 done: 1462 release_update_locks_for_mdev(matrix_mdev); 1463 return ret; 1464 } 1465 static DEVICE_ATTR_WO(unassign_control_domain); 1466 1467 static ssize_t control_domains_show(struct device *dev, 1468 struct device_attribute *dev_attr, 1469 char *buf) 1470 { 1471 unsigned long id; 1472 int nchars = 0; 1473 int n; 1474 char *bufpos = buf; 1475 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); 1476 unsigned long max_domid = matrix_mdev->matrix.adm_max; 1477 1478 mutex_lock(&matrix_dev->mdevs_lock); 1479 for_each_set_bit_inv(id, matrix_mdev->matrix.adm, max_domid + 1) { 1480 n = sprintf(bufpos, "%04lx\n", id); 1481 bufpos += n; 1482 nchars += n; 1483 } 1484 mutex_unlock(&matrix_dev->mdevs_lock); 1485 1486 return nchars; 1487 } 1488 static DEVICE_ATTR_RO(control_domains); 1489 1490 static ssize_t vfio_ap_mdev_matrix_show(struct ap_matrix *matrix, char *buf) 1491 { 1492 char *bufpos = buf; 1493 unsigned long apid; 1494 unsigned long apqi; 1495 unsigned long apid1; 1496 unsigned long apqi1; 1497 unsigned long napm_bits = matrix->apm_max + 1; 1498 unsigned long naqm_bits = matrix->aqm_max + 1; 1499 int nchars = 0; 1500 int n; 1501 1502 apid1 = find_first_bit_inv(matrix->apm, napm_bits); 1503 apqi1 = find_first_bit_inv(matrix->aqm, naqm_bits); 1504 1505 if ((apid1 < napm_bits) && (apqi1 < naqm_bits)) { 1506 for_each_set_bit_inv(apid, matrix->apm, napm_bits) { 1507 for_each_set_bit_inv(apqi, matrix->aqm, 1508 naqm_bits) { 1509 n = sprintf(bufpos, "%02lx.%04lx\n", apid, 1510 apqi); 1511 bufpos += n; 1512 nchars += n; 1513 } 1514 } 1515 } else if (apid1 < napm_bits) { 1516 for_each_set_bit_inv(apid, matrix->apm, napm_bits) { 1517 n = sprintf(bufpos, "%02lx.\n", apid); 1518 bufpos += n; 1519 nchars += n; 1520 } 1521 } else if (apqi1 < naqm_bits) { 1522 for_each_set_bit_inv(apqi, matrix->aqm, naqm_bits) { 1523 n = sprintf(bufpos, ".%04lx\n", apqi); 1524 bufpos += n; 1525 nchars += n; 1526 } 1527 } 1528 1529 return nchars; 1530 } 1531 1532 static ssize_t matrix_show(struct device *dev, struct device_attribute *attr, 1533 char *buf) 1534 { 1535 ssize_t nchars; 1536 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); 1537 1538 mutex_lock(&matrix_dev->mdevs_lock); 1539 nchars = vfio_ap_mdev_matrix_show(&matrix_mdev->matrix, buf); 1540 mutex_unlock(&matrix_dev->mdevs_lock); 1541 1542 return nchars; 1543 } 1544 static DEVICE_ATTR_RO(matrix); 1545 1546 static ssize_t guest_matrix_show(struct device *dev, 1547 struct device_attribute *attr, char *buf) 1548 { 1549 ssize_t nchars; 1550 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); 1551 1552 mutex_lock(&matrix_dev->mdevs_lock); 1553 nchars = vfio_ap_mdev_matrix_show(&matrix_mdev->shadow_apcb, buf); 1554 mutex_unlock(&matrix_dev->mdevs_lock); 1555 1556 return nchars; 1557 } 1558 static DEVICE_ATTR_RO(guest_matrix); 1559 1560 static struct attribute *vfio_ap_mdev_attrs[] = { 1561 &dev_attr_assign_adapter.attr, 1562 &dev_attr_unassign_adapter.attr, 1563 &dev_attr_assign_domain.attr, 1564 &dev_attr_unassign_domain.attr, 1565 &dev_attr_assign_control_domain.attr, 1566 &dev_attr_unassign_control_domain.attr, 1567 &dev_attr_control_domains.attr, 1568 &dev_attr_matrix.attr, 1569 &dev_attr_guest_matrix.attr, 1570 NULL, 1571 }; 1572 1573 static struct attribute_group vfio_ap_mdev_attr_group = { 1574 .attrs = vfio_ap_mdev_attrs 1575 }; 1576 1577 static const struct attribute_group *vfio_ap_mdev_attr_groups[] = { 1578 &vfio_ap_mdev_attr_group, 1579 NULL 1580 }; 1581 1582 /** 1583 * vfio_ap_mdev_set_kvm - sets all data for @matrix_mdev that are needed 1584 * to manage AP resources for the guest whose state is represented by @kvm 1585 * 1586 * @matrix_mdev: a mediated matrix device 1587 * @kvm: reference to KVM instance 1588 * 1589 * Return: 0 if no other mediated matrix device has a reference to @kvm; 1590 * otherwise, returns an -EPERM. 1591 */ 1592 static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev, 1593 struct kvm *kvm) 1594 { 1595 struct ap_matrix_mdev *m; 1596 1597 if (kvm->arch.crypto.crycbd) { 1598 down_write(&kvm->arch.crypto.pqap_hook_rwsem); 1599 kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook; 1600 up_write(&kvm->arch.crypto.pqap_hook_rwsem); 1601 1602 get_update_locks_for_kvm(kvm); 1603 1604 list_for_each_entry(m, &matrix_dev->mdev_list, node) { 1605 if (m != matrix_mdev && m->kvm == kvm) { 1606 release_update_locks_for_kvm(kvm); 1607 return -EPERM; 1608 } 1609 } 1610 1611 kvm_get_kvm(kvm); 1612 matrix_mdev->kvm = kvm; 1613 vfio_ap_mdev_update_guest_apcb(matrix_mdev); 1614 1615 release_update_locks_for_kvm(kvm); 1616 } 1617 1618 return 0; 1619 } 1620 1621 static void unmap_iova(struct ap_matrix_mdev *matrix_mdev, u64 iova, u64 length) 1622 { 1623 struct ap_queue_table *qtable = &matrix_mdev->qtable; 1624 struct vfio_ap_queue *q; 1625 int loop_cursor; 1626 1627 hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode) { 1628 if (q->saved_iova >= iova && q->saved_iova < iova + length) 1629 vfio_ap_irq_disable(q); 1630 } 1631 } 1632 1633 static void vfio_ap_mdev_dma_unmap(struct vfio_device *vdev, u64 iova, 1634 u64 length) 1635 { 1636 struct ap_matrix_mdev *matrix_mdev = 1637 container_of(vdev, struct ap_matrix_mdev, vdev); 1638 1639 mutex_lock(&matrix_dev->mdevs_lock); 1640 1641 unmap_iova(matrix_mdev, iova, length); 1642 1643 mutex_unlock(&matrix_dev->mdevs_lock); 1644 } 1645 1646 /** 1647 * vfio_ap_mdev_unset_kvm - performs clean-up of resources no longer needed 1648 * by @matrix_mdev. 1649 * 1650 * @matrix_mdev: a matrix mediated device 1651 */ 1652 static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev) 1653 { 1654 struct kvm *kvm = matrix_mdev->kvm; 1655 1656 if (kvm && kvm->arch.crypto.crycbd) { 1657 down_write(&kvm->arch.crypto.pqap_hook_rwsem); 1658 kvm->arch.crypto.pqap_hook = NULL; 1659 up_write(&kvm->arch.crypto.pqap_hook_rwsem); 1660 1661 get_update_locks_for_kvm(kvm); 1662 1663 kvm_arch_crypto_clear_masks(kvm); 1664 vfio_ap_mdev_reset_queues(matrix_mdev); 1665 kvm_put_kvm(kvm); 1666 matrix_mdev->kvm = NULL; 1667 1668 release_update_locks_for_kvm(kvm); 1669 } 1670 } 1671 1672 static struct vfio_ap_queue *vfio_ap_find_queue(int apqn) 1673 { 1674 struct ap_queue *queue; 1675 struct vfio_ap_queue *q = NULL; 1676 1677 queue = ap_get_qdev(apqn); 1678 if (!queue) 1679 return NULL; 1680 1681 if (queue->ap_dev.device.driver == &matrix_dev->vfio_ap_drv->driver) 1682 q = dev_get_drvdata(&queue->ap_dev.device); 1683 1684 put_device(&queue->ap_dev.device); 1685 1686 return q; 1687 } 1688 1689 static int apq_status_check(int apqn, struct ap_queue_status *status) 1690 { 1691 switch (status->response_code) { 1692 case AP_RESPONSE_NORMAL: 1693 case AP_RESPONSE_DECONFIGURED: 1694 return 0; 1695 case AP_RESPONSE_RESET_IN_PROGRESS: 1696 case AP_RESPONSE_BUSY: 1697 return -EBUSY; 1698 case AP_RESPONSE_ASSOC_SECRET_NOT_UNIQUE: 1699 case AP_RESPONSE_ASSOC_FAILED: 1700 /* 1701 * These asynchronous response codes indicate a PQAP(AAPQ) 1702 * instruction to associate a secret with the guest failed. All 1703 * subsequent AP instructions will end with the asynchronous 1704 * response code until the AP queue is reset; so, let's return 1705 * a value indicating a reset needs to be performed again. 1706 */ 1707 return -EAGAIN; 1708 default: 1709 WARN(true, 1710 "failed to verify reset of queue %02x.%04x: TAPQ rc=%u\n", 1711 AP_QID_CARD(apqn), AP_QID_QUEUE(apqn), 1712 status->response_code); 1713 return -EIO; 1714 } 1715 } 1716 1717 #define WAIT_MSG "Waited %dms for reset of queue %02x.%04x (%u, %u, %u)" 1718 1719 static void apq_reset_check(struct work_struct *reset_work) 1720 { 1721 int ret = -EBUSY, elapsed = 0; 1722 struct ap_queue_status status; 1723 struct vfio_ap_queue *q; 1724 1725 q = container_of(reset_work, struct vfio_ap_queue, reset_work); 1726 memcpy(&status, &q->reset_status, sizeof(status)); 1727 while (true) { 1728 msleep(AP_RESET_INTERVAL); 1729 elapsed += AP_RESET_INTERVAL; 1730 status = ap_tapq(q->apqn, NULL); 1731 ret = apq_status_check(q->apqn, &status); 1732 if (ret == -EIO) 1733 return; 1734 if (ret == -EBUSY) { 1735 pr_notice_ratelimited(WAIT_MSG, elapsed, 1736 AP_QID_CARD(q->apqn), 1737 AP_QID_QUEUE(q->apqn), 1738 status.response_code, 1739 status.queue_empty, 1740 status.irq_enabled); 1741 } else { 1742 if (q->reset_status.response_code == AP_RESPONSE_RESET_IN_PROGRESS || 1743 q->reset_status.response_code == AP_RESPONSE_BUSY || 1744 q->reset_status.response_code == AP_RESPONSE_STATE_CHANGE_IN_PROGRESS || 1745 ret == -EAGAIN) { 1746 status = ap_zapq(q->apqn, 0); 1747 memcpy(&q->reset_status, &status, sizeof(status)); 1748 continue; 1749 } 1750 /* 1751 * When an AP adapter is deconfigured, the 1752 * associated queues are reset, so let's set the 1753 * status response code to 0 so the queue may be 1754 * passed through (i.e., not filtered) 1755 */ 1756 if (status.response_code == AP_RESPONSE_DECONFIGURED) 1757 q->reset_status.response_code = 0; 1758 if (q->saved_isc != VFIO_AP_ISC_INVALID) 1759 vfio_ap_free_aqic_resources(q); 1760 break; 1761 } 1762 } 1763 } 1764 1765 static void vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q) 1766 { 1767 struct ap_queue_status status; 1768 1769 if (!q) 1770 return; 1771 status = ap_zapq(q->apqn, 0); 1772 memcpy(&q->reset_status, &status, sizeof(status)); 1773 switch (status.response_code) { 1774 case AP_RESPONSE_NORMAL: 1775 case AP_RESPONSE_RESET_IN_PROGRESS: 1776 case AP_RESPONSE_BUSY: 1777 case AP_RESPONSE_STATE_CHANGE_IN_PROGRESS: 1778 /* 1779 * Let's verify whether the ZAPQ completed successfully on a work queue. 1780 */ 1781 queue_work(system_long_wq, &q->reset_work); 1782 break; 1783 case AP_RESPONSE_DECONFIGURED: 1784 /* 1785 * When an AP adapter is deconfigured, the associated 1786 * queues are reset, so let's set the status response code to 0 1787 * so the queue may be passed through (i.e., not filtered). 1788 */ 1789 q->reset_status.response_code = 0; 1790 vfio_ap_free_aqic_resources(q); 1791 break; 1792 default: 1793 WARN(true, 1794 "PQAP/ZAPQ for %02x.%04x failed with invalid rc=%u\n", 1795 AP_QID_CARD(q->apqn), AP_QID_QUEUE(q->apqn), 1796 status.response_code); 1797 } 1798 } 1799 1800 static int vfio_ap_mdev_reset_queues(struct ap_matrix_mdev *matrix_mdev) 1801 { 1802 int ret = 0, loop_cursor; 1803 struct vfio_ap_queue *q; 1804 1805 hash_for_each(matrix_mdev->qtable.queues, loop_cursor, q, mdev_qnode) 1806 vfio_ap_mdev_reset_queue(q); 1807 1808 hash_for_each(matrix_mdev->qtable.queues, loop_cursor, q, mdev_qnode) { 1809 flush_work(&q->reset_work); 1810 1811 if (q->reset_status.response_code) 1812 ret = -EIO; 1813 } 1814 1815 return ret; 1816 } 1817 1818 static int vfio_ap_mdev_reset_qlist(struct list_head *qlist) 1819 { 1820 int ret = 0; 1821 struct vfio_ap_queue *q; 1822 1823 list_for_each_entry(q, qlist, reset_qnode) 1824 vfio_ap_mdev_reset_queue(q); 1825 1826 list_for_each_entry(q, qlist, reset_qnode) { 1827 flush_work(&q->reset_work); 1828 1829 if (q->reset_status.response_code) 1830 ret = -EIO; 1831 } 1832 1833 return ret; 1834 } 1835 1836 static int vfio_ap_mdev_open_device(struct vfio_device *vdev) 1837 { 1838 struct ap_matrix_mdev *matrix_mdev = 1839 container_of(vdev, struct ap_matrix_mdev, vdev); 1840 1841 if (!vdev->kvm) 1842 return -EINVAL; 1843 1844 return vfio_ap_mdev_set_kvm(matrix_mdev, vdev->kvm); 1845 } 1846 1847 static void vfio_ap_mdev_close_device(struct vfio_device *vdev) 1848 { 1849 struct ap_matrix_mdev *matrix_mdev = 1850 container_of(vdev, struct ap_matrix_mdev, vdev); 1851 1852 vfio_ap_mdev_unset_kvm(matrix_mdev); 1853 } 1854 1855 static void vfio_ap_mdev_request(struct vfio_device *vdev, unsigned int count) 1856 { 1857 struct device *dev = vdev->dev; 1858 struct ap_matrix_mdev *matrix_mdev; 1859 1860 matrix_mdev = container_of(vdev, struct ap_matrix_mdev, vdev); 1861 1862 if (matrix_mdev->req_trigger) { 1863 if (!(count % 10)) 1864 dev_notice_ratelimited(dev, 1865 "Relaying device request to user (#%u)\n", 1866 count); 1867 1868 eventfd_signal(matrix_mdev->req_trigger); 1869 } else if (count == 0) { 1870 dev_notice(dev, 1871 "No device request registered, blocked until released by user\n"); 1872 } 1873 } 1874 1875 static int vfio_ap_mdev_get_device_info(unsigned long arg) 1876 { 1877 unsigned long minsz; 1878 struct vfio_device_info info; 1879 1880 minsz = offsetofend(struct vfio_device_info, num_irqs); 1881 1882 if (copy_from_user(&info, (void __user *)arg, minsz)) 1883 return -EFAULT; 1884 1885 if (info.argsz < minsz) 1886 return -EINVAL; 1887 1888 info.flags = VFIO_DEVICE_FLAGS_AP | VFIO_DEVICE_FLAGS_RESET; 1889 info.num_regions = 0; 1890 info.num_irqs = VFIO_AP_NUM_IRQS; 1891 1892 return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0; 1893 } 1894 1895 static ssize_t vfio_ap_get_irq_info(unsigned long arg) 1896 { 1897 unsigned long minsz; 1898 struct vfio_irq_info info; 1899 1900 minsz = offsetofend(struct vfio_irq_info, count); 1901 1902 if (copy_from_user(&info, (void __user *)arg, minsz)) 1903 return -EFAULT; 1904 1905 if (info.argsz < minsz || info.index >= VFIO_AP_NUM_IRQS) 1906 return -EINVAL; 1907 1908 switch (info.index) { 1909 case VFIO_AP_REQ_IRQ_INDEX: 1910 info.count = 1; 1911 info.flags = VFIO_IRQ_INFO_EVENTFD; 1912 break; 1913 default: 1914 return -EINVAL; 1915 } 1916 1917 return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0; 1918 } 1919 1920 static int vfio_ap_irq_set_init(struct vfio_irq_set *irq_set, unsigned long arg) 1921 { 1922 int ret; 1923 size_t data_size; 1924 unsigned long minsz; 1925 1926 minsz = offsetofend(struct vfio_irq_set, count); 1927 1928 if (copy_from_user(irq_set, (void __user *)arg, minsz)) 1929 return -EFAULT; 1930 1931 ret = vfio_set_irqs_validate_and_prepare(irq_set, 1, VFIO_AP_NUM_IRQS, 1932 &data_size); 1933 if (ret) 1934 return ret; 1935 1936 if (!(irq_set->flags & VFIO_IRQ_SET_ACTION_TRIGGER)) 1937 return -EINVAL; 1938 1939 return 0; 1940 } 1941 1942 static int vfio_ap_set_request_irq(struct ap_matrix_mdev *matrix_mdev, 1943 unsigned long arg) 1944 { 1945 s32 fd; 1946 void __user *data; 1947 unsigned long minsz; 1948 struct eventfd_ctx *req_trigger; 1949 1950 minsz = offsetofend(struct vfio_irq_set, count); 1951 data = (void __user *)(arg + minsz); 1952 1953 if (get_user(fd, (s32 __user *)data)) 1954 return -EFAULT; 1955 1956 if (fd == -1) { 1957 if (matrix_mdev->req_trigger) 1958 eventfd_ctx_put(matrix_mdev->req_trigger); 1959 matrix_mdev->req_trigger = NULL; 1960 } else if (fd >= 0) { 1961 req_trigger = eventfd_ctx_fdget(fd); 1962 if (IS_ERR(req_trigger)) 1963 return PTR_ERR(req_trigger); 1964 1965 if (matrix_mdev->req_trigger) 1966 eventfd_ctx_put(matrix_mdev->req_trigger); 1967 1968 matrix_mdev->req_trigger = req_trigger; 1969 } else { 1970 return -EINVAL; 1971 } 1972 1973 return 0; 1974 } 1975 1976 static int vfio_ap_set_irqs(struct ap_matrix_mdev *matrix_mdev, 1977 unsigned long arg) 1978 { 1979 int ret; 1980 struct vfio_irq_set irq_set; 1981 1982 ret = vfio_ap_irq_set_init(&irq_set, arg); 1983 if (ret) 1984 return ret; 1985 1986 switch (irq_set.flags & VFIO_IRQ_SET_DATA_TYPE_MASK) { 1987 case VFIO_IRQ_SET_DATA_EVENTFD: 1988 switch (irq_set.index) { 1989 case VFIO_AP_REQ_IRQ_INDEX: 1990 return vfio_ap_set_request_irq(matrix_mdev, arg); 1991 default: 1992 return -EINVAL; 1993 } 1994 default: 1995 return -EINVAL; 1996 } 1997 } 1998 1999 static ssize_t vfio_ap_mdev_ioctl(struct vfio_device *vdev, 2000 unsigned int cmd, unsigned long arg) 2001 { 2002 struct ap_matrix_mdev *matrix_mdev = 2003 container_of(vdev, struct ap_matrix_mdev, vdev); 2004 int ret; 2005 2006 mutex_lock(&matrix_dev->mdevs_lock); 2007 switch (cmd) { 2008 case VFIO_DEVICE_GET_INFO: 2009 ret = vfio_ap_mdev_get_device_info(arg); 2010 break; 2011 case VFIO_DEVICE_RESET: 2012 ret = vfio_ap_mdev_reset_queues(matrix_mdev); 2013 break; 2014 case VFIO_DEVICE_GET_IRQ_INFO: 2015 ret = vfio_ap_get_irq_info(arg); 2016 break; 2017 case VFIO_DEVICE_SET_IRQS: 2018 ret = vfio_ap_set_irqs(matrix_mdev, arg); 2019 break; 2020 default: 2021 ret = -EOPNOTSUPP; 2022 break; 2023 } 2024 mutex_unlock(&matrix_dev->mdevs_lock); 2025 2026 return ret; 2027 } 2028 2029 static struct ap_matrix_mdev *vfio_ap_mdev_for_queue(struct vfio_ap_queue *q) 2030 { 2031 struct ap_matrix_mdev *matrix_mdev; 2032 unsigned long apid = AP_QID_CARD(q->apqn); 2033 unsigned long apqi = AP_QID_QUEUE(q->apqn); 2034 2035 list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) { 2036 if (test_bit_inv(apid, matrix_mdev->matrix.apm) && 2037 test_bit_inv(apqi, matrix_mdev->matrix.aqm)) 2038 return matrix_mdev; 2039 } 2040 2041 return NULL; 2042 } 2043 2044 static ssize_t status_show(struct device *dev, 2045 struct device_attribute *attr, 2046 char *buf) 2047 { 2048 ssize_t nchars = 0; 2049 struct vfio_ap_queue *q; 2050 unsigned long apid, apqi; 2051 struct ap_matrix_mdev *matrix_mdev; 2052 struct ap_device *apdev = to_ap_dev(dev); 2053 2054 mutex_lock(&matrix_dev->mdevs_lock); 2055 q = dev_get_drvdata(&apdev->device); 2056 matrix_mdev = vfio_ap_mdev_for_queue(q); 2057 2058 /* If the queue is assigned to the matrix mediated device, then 2059 * determine whether it is passed through to a guest; otherwise, 2060 * indicate that it is unassigned. 2061 */ 2062 if (matrix_mdev) { 2063 apid = AP_QID_CARD(q->apqn); 2064 apqi = AP_QID_QUEUE(q->apqn); 2065 /* 2066 * If the queue is passed through to the guest, then indicate 2067 * that it is in use; otherwise, indicate that it is 2068 * merely assigned to a matrix mediated device. 2069 */ 2070 if (matrix_mdev->kvm && 2071 test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) && 2072 test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) 2073 nchars = scnprintf(buf, PAGE_SIZE, "%s\n", 2074 AP_QUEUE_IN_USE); 2075 else 2076 nchars = scnprintf(buf, PAGE_SIZE, "%s\n", 2077 AP_QUEUE_ASSIGNED); 2078 } else { 2079 nchars = scnprintf(buf, PAGE_SIZE, "%s\n", 2080 AP_QUEUE_UNASSIGNED); 2081 } 2082 2083 mutex_unlock(&matrix_dev->mdevs_lock); 2084 2085 return nchars; 2086 } 2087 2088 static DEVICE_ATTR_RO(status); 2089 2090 static struct attribute *vfio_queue_attrs[] = { 2091 &dev_attr_status.attr, 2092 NULL, 2093 }; 2094 2095 static const struct attribute_group vfio_queue_attr_group = { 2096 .attrs = vfio_queue_attrs, 2097 }; 2098 2099 static const struct vfio_device_ops vfio_ap_matrix_dev_ops = { 2100 .init = vfio_ap_mdev_init_dev, 2101 .open_device = vfio_ap_mdev_open_device, 2102 .close_device = vfio_ap_mdev_close_device, 2103 .ioctl = vfio_ap_mdev_ioctl, 2104 .dma_unmap = vfio_ap_mdev_dma_unmap, 2105 .bind_iommufd = vfio_iommufd_emulated_bind, 2106 .unbind_iommufd = vfio_iommufd_emulated_unbind, 2107 .attach_ioas = vfio_iommufd_emulated_attach_ioas, 2108 .detach_ioas = vfio_iommufd_emulated_detach_ioas, 2109 .request = vfio_ap_mdev_request 2110 }; 2111 2112 static struct mdev_driver vfio_ap_matrix_driver = { 2113 .device_api = VFIO_DEVICE_API_AP_STRING, 2114 .max_instances = MAX_ZDEV_ENTRIES_EXT, 2115 .driver = { 2116 .name = "vfio_ap_mdev", 2117 .owner = THIS_MODULE, 2118 .mod_name = KBUILD_MODNAME, 2119 .dev_groups = vfio_ap_mdev_attr_groups, 2120 }, 2121 .probe = vfio_ap_mdev_probe, 2122 .remove = vfio_ap_mdev_remove, 2123 }; 2124 2125 int vfio_ap_mdev_register(void) 2126 { 2127 int ret; 2128 2129 ret = mdev_register_driver(&vfio_ap_matrix_driver); 2130 if (ret) 2131 return ret; 2132 2133 matrix_dev->mdev_type.sysfs_name = VFIO_AP_MDEV_TYPE_HWVIRT; 2134 matrix_dev->mdev_type.pretty_name = VFIO_AP_MDEV_NAME_HWVIRT; 2135 matrix_dev->mdev_types[0] = &matrix_dev->mdev_type; 2136 ret = mdev_register_parent(&matrix_dev->parent, &matrix_dev->device, 2137 &vfio_ap_matrix_driver, 2138 matrix_dev->mdev_types, 1); 2139 if (ret) 2140 goto err_driver; 2141 return 0; 2142 2143 err_driver: 2144 mdev_unregister_driver(&vfio_ap_matrix_driver); 2145 return ret; 2146 } 2147 2148 void vfio_ap_mdev_unregister(void) 2149 { 2150 mdev_unregister_parent(&matrix_dev->parent); 2151 mdev_unregister_driver(&vfio_ap_matrix_driver); 2152 } 2153 2154 int vfio_ap_mdev_probe_queue(struct ap_device *apdev) 2155 { 2156 int ret; 2157 struct vfio_ap_queue *q; 2158 DECLARE_BITMAP(apm_filtered, AP_DEVICES); 2159 struct ap_matrix_mdev *matrix_mdev; 2160 2161 ret = sysfs_create_group(&apdev->device.kobj, &vfio_queue_attr_group); 2162 if (ret) 2163 return ret; 2164 2165 q = kzalloc(sizeof(*q), GFP_KERNEL); 2166 if (!q) { 2167 ret = -ENOMEM; 2168 goto err_remove_group; 2169 } 2170 2171 q->apqn = to_ap_queue(&apdev->device)->qid; 2172 q->saved_isc = VFIO_AP_ISC_INVALID; 2173 memset(&q->reset_status, 0, sizeof(q->reset_status)); 2174 INIT_WORK(&q->reset_work, apq_reset_check); 2175 matrix_mdev = get_update_locks_by_apqn(q->apqn); 2176 2177 if (matrix_mdev) { 2178 vfio_ap_mdev_link_queue(matrix_mdev, q); 2179 2180 /* 2181 * If we're in the process of handling the adding of adapters or 2182 * domains to the host's AP configuration, then let the 2183 * vfio_ap device driver's on_scan_complete callback filter the 2184 * matrix and update the guest's AP configuration after all of 2185 * the new queue devices are probed. 2186 */ 2187 if (!bitmap_empty(matrix_mdev->apm_add, AP_DEVICES) || 2188 !bitmap_empty(matrix_mdev->aqm_add, AP_DOMAINS)) 2189 goto done; 2190 2191 if (vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered)) { 2192 vfio_ap_mdev_update_guest_apcb(matrix_mdev); 2193 reset_queues_for_apids(matrix_mdev, apm_filtered); 2194 } 2195 } 2196 2197 done: 2198 dev_set_drvdata(&apdev->device, q); 2199 release_update_locks_for_mdev(matrix_mdev); 2200 2201 return ret; 2202 2203 err_remove_group: 2204 sysfs_remove_group(&apdev->device.kobj, &vfio_queue_attr_group); 2205 return ret; 2206 } 2207 2208 void vfio_ap_mdev_remove_queue(struct ap_device *apdev) 2209 { 2210 unsigned long apid, apqi; 2211 struct vfio_ap_queue *q; 2212 struct ap_matrix_mdev *matrix_mdev; 2213 2214 sysfs_remove_group(&apdev->device.kobj, &vfio_queue_attr_group); 2215 q = dev_get_drvdata(&apdev->device); 2216 get_update_locks_for_queue(q); 2217 matrix_mdev = q->matrix_mdev; 2218 apid = AP_QID_CARD(q->apqn); 2219 apqi = AP_QID_QUEUE(q->apqn); 2220 2221 if (matrix_mdev) { 2222 /* If the queue is assigned to the guest's AP configuration */ 2223 if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) && 2224 test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) { 2225 /* 2226 * Since the queues are defined via a matrix of adapters 2227 * and domains, it is not possible to hot unplug a 2228 * single queue; so, let's unplug the adapter. 2229 */ 2230 clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm); 2231 vfio_ap_mdev_update_guest_apcb(matrix_mdev); 2232 reset_queues_for_apid(matrix_mdev, apid); 2233 goto done; 2234 } 2235 } 2236 2237 /* 2238 * If the queue is not in the host's AP configuration, then resetting 2239 * it will fail with response code 01, (APQN not valid); so, let's make 2240 * sure it is in the host's config. 2241 */ 2242 if (test_bit_inv(apid, (unsigned long *)matrix_dev->info.apm) && 2243 test_bit_inv(apqi, (unsigned long *)matrix_dev->info.aqm)) { 2244 vfio_ap_mdev_reset_queue(q); 2245 flush_work(&q->reset_work); 2246 } 2247 2248 done: 2249 if (matrix_mdev) 2250 vfio_ap_unlink_queue_fr_mdev(q); 2251 2252 dev_set_drvdata(&apdev->device, NULL); 2253 kfree(q); 2254 release_update_locks_for_mdev(matrix_mdev); 2255 } 2256 2257 /** 2258 * vfio_ap_mdev_resource_in_use: check whether any of a set of APQNs is 2259 * assigned to a mediated device under the control 2260 * of the vfio_ap device driver. 2261 * 2262 * @apm: a bitmap specifying a set of APIDs comprising the APQNs to check. 2263 * @aqm: a bitmap specifying a set of APQIs comprising the APQNs to check. 2264 * 2265 * Return: 2266 * * -EADDRINUSE if one or more of the APQNs specified via @apm/@aqm are 2267 * assigned to a mediated device under the control of the vfio_ap 2268 * device driver. 2269 * * Otherwise, return 0. 2270 */ 2271 int vfio_ap_mdev_resource_in_use(unsigned long *apm, unsigned long *aqm) 2272 { 2273 int ret; 2274 2275 mutex_lock(&matrix_dev->guests_lock); 2276 mutex_lock(&matrix_dev->mdevs_lock); 2277 ret = vfio_ap_mdev_verify_no_sharing(apm, aqm); 2278 mutex_unlock(&matrix_dev->mdevs_lock); 2279 mutex_unlock(&matrix_dev->guests_lock); 2280 2281 return ret; 2282 } 2283 2284 /** 2285 * vfio_ap_mdev_hot_unplug_cfg - hot unplug the adapters, domains and control 2286 * domains that have been removed from the host's 2287 * AP configuration from a guest. 2288 * 2289 * @matrix_mdev: an ap_matrix_mdev object attached to a KVM guest. 2290 * @aprem: the adapters that have been removed from the host's AP configuration 2291 * @aqrem: the domains that have been removed from the host's AP configuration 2292 * @cdrem: the control domains that have been removed from the host's AP 2293 * configuration. 2294 */ 2295 static void vfio_ap_mdev_hot_unplug_cfg(struct ap_matrix_mdev *matrix_mdev, 2296 unsigned long *aprem, 2297 unsigned long *aqrem, 2298 unsigned long *cdrem) 2299 { 2300 int do_hotplug = 0; 2301 2302 if (!bitmap_empty(aprem, AP_DEVICES)) { 2303 do_hotplug |= bitmap_andnot(matrix_mdev->shadow_apcb.apm, 2304 matrix_mdev->shadow_apcb.apm, 2305 aprem, AP_DEVICES); 2306 } 2307 2308 if (!bitmap_empty(aqrem, AP_DOMAINS)) { 2309 do_hotplug |= bitmap_andnot(matrix_mdev->shadow_apcb.aqm, 2310 matrix_mdev->shadow_apcb.aqm, 2311 aqrem, AP_DEVICES); 2312 } 2313 2314 if (!bitmap_empty(cdrem, AP_DOMAINS)) 2315 do_hotplug |= bitmap_andnot(matrix_mdev->shadow_apcb.adm, 2316 matrix_mdev->shadow_apcb.adm, 2317 cdrem, AP_DOMAINS); 2318 2319 if (do_hotplug) 2320 vfio_ap_mdev_update_guest_apcb(matrix_mdev); 2321 } 2322 2323 /** 2324 * vfio_ap_mdev_cfg_remove - determines which guests are using the adapters, 2325 * domains and control domains that have been removed 2326 * from the host AP configuration and unplugs them 2327 * from those guests. 2328 * 2329 * @ap_remove: bitmap specifying which adapters have been removed from the host 2330 * config. 2331 * @aq_remove: bitmap specifying which domains have been removed from the host 2332 * config. 2333 * @cd_remove: bitmap specifying which control domains have been removed from 2334 * the host config. 2335 */ 2336 static void vfio_ap_mdev_cfg_remove(unsigned long *ap_remove, 2337 unsigned long *aq_remove, 2338 unsigned long *cd_remove) 2339 { 2340 struct ap_matrix_mdev *matrix_mdev; 2341 DECLARE_BITMAP(aprem, AP_DEVICES); 2342 DECLARE_BITMAP(aqrem, AP_DOMAINS); 2343 DECLARE_BITMAP(cdrem, AP_DOMAINS); 2344 int do_remove = 0; 2345 2346 list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) { 2347 mutex_lock(&matrix_mdev->kvm->lock); 2348 mutex_lock(&matrix_dev->mdevs_lock); 2349 2350 do_remove |= bitmap_and(aprem, ap_remove, 2351 matrix_mdev->matrix.apm, 2352 AP_DEVICES); 2353 do_remove |= bitmap_and(aqrem, aq_remove, 2354 matrix_mdev->matrix.aqm, 2355 AP_DOMAINS); 2356 do_remove |= bitmap_andnot(cdrem, cd_remove, 2357 matrix_mdev->matrix.adm, 2358 AP_DOMAINS); 2359 2360 if (do_remove) 2361 vfio_ap_mdev_hot_unplug_cfg(matrix_mdev, aprem, aqrem, 2362 cdrem); 2363 2364 mutex_unlock(&matrix_dev->mdevs_lock); 2365 mutex_unlock(&matrix_mdev->kvm->lock); 2366 } 2367 } 2368 2369 /** 2370 * vfio_ap_mdev_on_cfg_remove - responds to the removal of adapters, domains and 2371 * control domains from the host AP configuration 2372 * by unplugging them from the guests that are 2373 * using them. 2374 * @cur_config_info: the current host AP configuration information 2375 * @prev_config_info: the previous host AP configuration information 2376 */ 2377 static void vfio_ap_mdev_on_cfg_remove(struct ap_config_info *cur_config_info, 2378 struct ap_config_info *prev_config_info) 2379 { 2380 int do_remove; 2381 DECLARE_BITMAP(aprem, AP_DEVICES); 2382 DECLARE_BITMAP(aqrem, AP_DOMAINS); 2383 DECLARE_BITMAP(cdrem, AP_DOMAINS); 2384 2385 do_remove = bitmap_andnot(aprem, 2386 (unsigned long *)prev_config_info->apm, 2387 (unsigned long *)cur_config_info->apm, 2388 AP_DEVICES); 2389 do_remove |= bitmap_andnot(aqrem, 2390 (unsigned long *)prev_config_info->aqm, 2391 (unsigned long *)cur_config_info->aqm, 2392 AP_DEVICES); 2393 do_remove |= bitmap_andnot(cdrem, 2394 (unsigned long *)prev_config_info->adm, 2395 (unsigned long *)cur_config_info->adm, 2396 AP_DEVICES); 2397 2398 if (do_remove) 2399 vfio_ap_mdev_cfg_remove(aprem, aqrem, cdrem); 2400 } 2401 2402 /** 2403 * vfio_ap_filter_apid_by_qtype: filter APIDs from an AP mask for adapters that 2404 * are older than AP type 10 (CEX4). 2405 * @apm: a bitmap of the APIDs to examine 2406 * @aqm: a bitmap of the APQIs of the queues to query for the AP type. 2407 */ 2408 static void vfio_ap_filter_apid_by_qtype(unsigned long *apm, unsigned long *aqm) 2409 { 2410 bool apid_cleared; 2411 struct ap_queue_status status; 2412 unsigned long apid, apqi; 2413 struct ap_tapq_hwinfo info; 2414 2415 for_each_set_bit_inv(apid, apm, AP_DEVICES) { 2416 apid_cleared = false; 2417 2418 for_each_set_bit_inv(apqi, aqm, AP_DOMAINS) { 2419 status = ap_test_queue(AP_MKQID(apid, apqi), 1, &info); 2420 switch (status.response_code) { 2421 /* 2422 * According to the architecture in each case 2423 * below, the queue's info should be filled. 2424 */ 2425 case AP_RESPONSE_NORMAL: 2426 case AP_RESPONSE_RESET_IN_PROGRESS: 2427 case AP_RESPONSE_DECONFIGURED: 2428 case AP_RESPONSE_CHECKSTOPPED: 2429 case AP_RESPONSE_BUSY: 2430 /* 2431 * The vfio_ap device driver only 2432 * supports CEX4 and newer adapters, so 2433 * remove the APID if the adapter is 2434 * older than a CEX4. 2435 */ 2436 if (info.at < AP_DEVICE_TYPE_CEX4) { 2437 clear_bit_inv(apid, apm); 2438 apid_cleared = true; 2439 } 2440 2441 break; 2442 2443 default: 2444 /* 2445 * If we don't know the adapter type, 2446 * clear its APID since it can't be 2447 * determined whether the vfio_ap 2448 * device driver supports it. 2449 */ 2450 clear_bit_inv(apid, apm); 2451 apid_cleared = true; 2452 break; 2453 } 2454 2455 /* 2456 * If we've already cleared the APID from the apm, there 2457 * is no need to continue examining the remainin AP 2458 * queues to determine the type of the adapter. 2459 */ 2460 if (apid_cleared) 2461 continue; 2462 } 2463 } 2464 } 2465 2466 /** 2467 * vfio_ap_mdev_cfg_add - store bitmaps specifying the adapters, domains and 2468 * control domains that have been added to the host's 2469 * AP configuration for each matrix mdev to which they 2470 * are assigned. 2471 * 2472 * @apm_add: a bitmap specifying the adapters that have been added to the AP 2473 * configuration. 2474 * @aqm_add: a bitmap specifying the domains that have been added to the AP 2475 * configuration. 2476 * @adm_add: a bitmap specifying the control domains that have been added to the 2477 * AP configuration. 2478 */ 2479 static void vfio_ap_mdev_cfg_add(unsigned long *apm_add, unsigned long *aqm_add, 2480 unsigned long *adm_add) 2481 { 2482 struct ap_matrix_mdev *matrix_mdev; 2483 2484 if (list_empty(&matrix_dev->mdev_list)) 2485 return; 2486 2487 vfio_ap_filter_apid_by_qtype(apm_add, aqm_add); 2488 2489 list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) { 2490 bitmap_and(matrix_mdev->apm_add, 2491 matrix_mdev->matrix.apm, apm_add, AP_DEVICES); 2492 bitmap_and(matrix_mdev->aqm_add, 2493 matrix_mdev->matrix.aqm, aqm_add, AP_DOMAINS); 2494 bitmap_and(matrix_mdev->adm_add, 2495 matrix_mdev->matrix.adm, adm_add, AP_DEVICES); 2496 } 2497 } 2498 2499 /** 2500 * vfio_ap_mdev_on_cfg_add - responds to the addition of adapters, domains and 2501 * control domains to the host AP configuration 2502 * by updating the bitmaps that specify what adapters, 2503 * domains and control domains have been added so they 2504 * can be hot plugged into the guest when the AP bus 2505 * scan completes (see vfio_ap_on_scan_complete 2506 * function). 2507 * @cur_config_info: the current AP configuration information 2508 * @prev_config_info: the previous AP configuration information 2509 */ 2510 static void vfio_ap_mdev_on_cfg_add(struct ap_config_info *cur_config_info, 2511 struct ap_config_info *prev_config_info) 2512 { 2513 bool do_add; 2514 DECLARE_BITMAP(apm_add, AP_DEVICES); 2515 DECLARE_BITMAP(aqm_add, AP_DOMAINS); 2516 DECLARE_BITMAP(adm_add, AP_DOMAINS); 2517 2518 do_add = bitmap_andnot(apm_add, 2519 (unsigned long *)cur_config_info->apm, 2520 (unsigned long *)prev_config_info->apm, 2521 AP_DEVICES); 2522 do_add |= bitmap_andnot(aqm_add, 2523 (unsigned long *)cur_config_info->aqm, 2524 (unsigned long *)prev_config_info->aqm, 2525 AP_DOMAINS); 2526 do_add |= bitmap_andnot(adm_add, 2527 (unsigned long *)cur_config_info->adm, 2528 (unsigned long *)prev_config_info->adm, 2529 AP_DOMAINS); 2530 2531 if (do_add) 2532 vfio_ap_mdev_cfg_add(apm_add, aqm_add, adm_add); 2533 } 2534 2535 /** 2536 * vfio_ap_on_cfg_changed - handles notification of changes to the host AP 2537 * configuration. 2538 * 2539 * @cur_cfg_info: the current host AP configuration 2540 * @prev_cfg_info: the previous host AP configuration 2541 */ 2542 void vfio_ap_on_cfg_changed(struct ap_config_info *cur_cfg_info, 2543 struct ap_config_info *prev_cfg_info) 2544 { 2545 if (!cur_cfg_info || !prev_cfg_info) 2546 return; 2547 2548 mutex_lock(&matrix_dev->guests_lock); 2549 2550 vfio_ap_mdev_on_cfg_remove(cur_cfg_info, prev_cfg_info); 2551 vfio_ap_mdev_on_cfg_add(cur_cfg_info, prev_cfg_info); 2552 memcpy(&matrix_dev->info, cur_cfg_info, sizeof(*cur_cfg_info)); 2553 2554 mutex_unlock(&matrix_dev->guests_lock); 2555 } 2556 2557 static void vfio_ap_mdev_hot_plug_cfg(struct ap_matrix_mdev *matrix_mdev) 2558 { 2559 DECLARE_BITMAP(apm_filtered, AP_DEVICES); 2560 bool filter_domains, filter_adapters, filter_cdoms, do_hotplug = false; 2561 2562 mutex_lock(&matrix_mdev->kvm->lock); 2563 mutex_lock(&matrix_dev->mdevs_lock); 2564 2565 filter_adapters = bitmap_intersects(matrix_mdev->matrix.apm, 2566 matrix_mdev->apm_add, AP_DEVICES); 2567 filter_domains = bitmap_intersects(matrix_mdev->matrix.aqm, 2568 matrix_mdev->aqm_add, AP_DOMAINS); 2569 filter_cdoms = bitmap_intersects(matrix_mdev->matrix.adm, 2570 matrix_mdev->adm_add, AP_DOMAINS); 2571 2572 if (filter_adapters || filter_domains) 2573 do_hotplug = vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered); 2574 2575 if (filter_cdoms) 2576 do_hotplug |= vfio_ap_mdev_filter_cdoms(matrix_mdev); 2577 2578 if (do_hotplug) 2579 vfio_ap_mdev_update_guest_apcb(matrix_mdev); 2580 2581 reset_queues_for_apids(matrix_mdev, apm_filtered); 2582 2583 mutex_unlock(&matrix_dev->mdevs_lock); 2584 mutex_unlock(&matrix_mdev->kvm->lock); 2585 } 2586 2587 void vfio_ap_on_scan_complete(struct ap_config_info *new_config_info, 2588 struct ap_config_info *old_config_info) 2589 { 2590 struct ap_matrix_mdev *matrix_mdev; 2591 2592 mutex_lock(&matrix_dev->guests_lock); 2593 2594 list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) { 2595 if (bitmap_empty(matrix_mdev->apm_add, AP_DEVICES) && 2596 bitmap_empty(matrix_mdev->aqm_add, AP_DOMAINS) && 2597 bitmap_empty(matrix_mdev->adm_add, AP_DOMAINS)) 2598 continue; 2599 2600 vfio_ap_mdev_hot_plug_cfg(matrix_mdev); 2601 bitmap_clear(matrix_mdev->apm_add, 0, AP_DEVICES); 2602 bitmap_clear(matrix_mdev->aqm_add, 0, AP_DOMAINS); 2603 bitmap_clear(matrix_mdev->adm_add, 0, AP_DOMAINS); 2604 } 2605 2606 mutex_unlock(&matrix_dev->guests_lock); 2607 } 2608