1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Adjunct processor matrix VFIO device driver callbacks. 4 * 5 * Copyright IBM Corp. 2018 6 * 7 * Author(s): Tony Krowiak <akrowiak@linux.ibm.com> 8 * Halil Pasic <pasic@linux.ibm.com> 9 * Pierre Morel <pmorel@linux.ibm.com> 10 */ 11 #include <linux/string.h> 12 #include <linux/vfio.h> 13 #include <linux/device.h> 14 #include <linux/list.h> 15 #include <linux/ctype.h> 16 #include <linux/bitops.h> 17 #include <linux/kvm_host.h> 18 #include <linux/module.h> 19 #include <linux/uuid.h> 20 #include <asm/kvm.h> 21 #include <asm/zcrypt.h> 22 23 #include "vfio_ap_private.h" 24 #include "vfio_ap_debug.h" 25 26 #define VFIO_AP_MDEV_TYPE_HWVIRT "passthrough" 27 #define VFIO_AP_MDEV_NAME_HWVIRT "VFIO AP Passthrough Device" 28 29 #define AP_QUEUE_ASSIGNED "assigned" 30 #define AP_QUEUE_UNASSIGNED "unassigned" 31 #define AP_QUEUE_IN_USE "in use" 32 33 #define AP_RESET_INTERVAL 20 /* Reset sleep interval (20ms) */ 34 35 static int vfio_ap_mdev_reset_queues(struct ap_matrix_mdev *matrix_mdev); 36 static int vfio_ap_mdev_reset_qlist(struct list_head *qlist); 37 static struct vfio_ap_queue *vfio_ap_find_queue(int apqn); 38 static const struct vfio_device_ops vfio_ap_matrix_dev_ops; 39 static void vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q); 40 41 /** 42 * get_update_locks_for_kvm: Acquire the locks required to dynamically update a 43 * KVM guest's APCB in the proper order. 44 * 45 * @kvm: a pointer to a struct kvm object containing the KVM guest's APCB. 46 * 47 * The proper locking order is: 48 * 1. matrix_dev->guests_lock: required to use the KVM pointer to update a KVM 49 * guest's APCB. 50 * 2. kvm->lock: required to update a guest's APCB 51 * 3. matrix_dev->mdevs_lock: required to access data stored in a matrix_mdev 52 * 53 * Note: If @kvm is NULL, the KVM lock will not be taken. 54 */ 55 static inline void get_update_locks_for_kvm(struct kvm *kvm) 56 { 57 mutex_lock(&matrix_dev->guests_lock); 58 if (kvm) 59 mutex_lock(&kvm->lock); 60 mutex_lock(&matrix_dev->mdevs_lock); 61 } 62 63 /** 64 * release_update_locks_for_kvm: Release the locks used to dynamically update a 65 * KVM guest's APCB in the proper order. 66 * 67 * @kvm: a pointer to a struct kvm object containing the KVM guest's APCB. 68 * 69 * The proper unlocking order is: 70 * 1. matrix_dev->mdevs_lock 71 * 2. kvm->lock 72 * 3. matrix_dev->guests_lock 73 * 74 * Note: If @kvm is NULL, the KVM lock will not be released. 75 */ 76 static inline void release_update_locks_for_kvm(struct kvm *kvm) 77 { 78 mutex_unlock(&matrix_dev->mdevs_lock); 79 if (kvm) 80 mutex_unlock(&kvm->lock); 81 mutex_unlock(&matrix_dev->guests_lock); 82 } 83 84 /** 85 * get_update_locks_for_mdev: Acquire the locks required to dynamically update a 86 * KVM guest's APCB in the proper order. 87 * 88 * @matrix_mdev: a pointer to a struct ap_matrix_mdev object containing the AP 89 * configuration data to use to update a KVM guest's APCB. 90 * 91 * The proper locking order is: 92 * 1. matrix_dev->guests_lock: required to use the KVM pointer to update a KVM 93 * guest's APCB. 94 * 2. matrix_mdev->kvm->lock: required to update a guest's APCB 95 * 3. matrix_dev->mdevs_lock: required to access data stored in a matrix_mdev 96 * 97 * Note: If @matrix_mdev is NULL or is not attached to a KVM guest, the KVM 98 * lock will not be taken. 99 */ 100 static inline void get_update_locks_for_mdev(struct ap_matrix_mdev *matrix_mdev) 101 { 102 mutex_lock(&matrix_dev->guests_lock); 103 if (matrix_mdev && matrix_mdev->kvm) 104 mutex_lock(&matrix_mdev->kvm->lock); 105 mutex_lock(&matrix_dev->mdevs_lock); 106 } 107 108 /** 109 * release_update_locks_for_mdev: Release the locks used to dynamically update a 110 * KVM guest's APCB in the proper order. 111 * 112 * @matrix_mdev: a pointer to a struct ap_matrix_mdev object containing the AP 113 * configuration data to use to update a KVM guest's APCB. 114 * 115 * The proper unlocking order is: 116 * 1. matrix_dev->mdevs_lock 117 * 2. matrix_mdev->kvm->lock 118 * 3. matrix_dev->guests_lock 119 * 120 * Note: If @matrix_mdev is NULL or is not attached to a KVM guest, the KVM 121 * lock will not be released. 122 */ 123 static inline void release_update_locks_for_mdev(struct ap_matrix_mdev *matrix_mdev) 124 { 125 mutex_unlock(&matrix_dev->mdevs_lock); 126 if (matrix_mdev && matrix_mdev->kvm) 127 mutex_unlock(&matrix_mdev->kvm->lock); 128 mutex_unlock(&matrix_dev->guests_lock); 129 } 130 131 /** 132 * get_update_locks_by_apqn: Find the mdev to which an APQN is assigned and 133 * acquire the locks required to update the APCB of 134 * the KVM guest to which the mdev is attached. 135 * 136 * @apqn: the APQN of a queue device. 137 * 138 * The proper locking order is: 139 * 1. matrix_dev->guests_lock: required to use the KVM pointer to update a KVM 140 * guest's APCB. 141 * 2. matrix_mdev->kvm->lock: required to update a guest's APCB 142 * 3. matrix_dev->mdevs_lock: required to access data stored in a matrix_mdev 143 * 144 * Note: If @apqn is not assigned to a matrix_mdev, the matrix_mdev->kvm->lock 145 * will not be taken. 146 * 147 * Return: the ap_matrix_mdev object to which @apqn is assigned or NULL if @apqn 148 * is not assigned to an ap_matrix_mdev. 149 */ 150 static struct ap_matrix_mdev *get_update_locks_by_apqn(int apqn) 151 { 152 struct ap_matrix_mdev *matrix_mdev; 153 154 mutex_lock(&matrix_dev->guests_lock); 155 156 list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) { 157 if (test_bit_inv(AP_QID_CARD(apqn), matrix_mdev->matrix.apm) && 158 test_bit_inv(AP_QID_QUEUE(apqn), matrix_mdev->matrix.aqm)) { 159 if (matrix_mdev->kvm) 160 mutex_lock(&matrix_mdev->kvm->lock); 161 162 mutex_lock(&matrix_dev->mdevs_lock); 163 164 return matrix_mdev; 165 } 166 } 167 168 mutex_lock(&matrix_dev->mdevs_lock); 169 170 return NULL; 171 } 172 173 /** 174 * get_update_locks_for_queue: get the locks required to update the APCB of the 175 * KVM guest to which the matrix mdev linked to a 176 * vfio_ap_queue object is attached. 177 * 178 * @q: a pointer to a vfio_ap_queue object. 179 * 180 * The proper locking order is: 181 * 1. q->matrix_dev->guests_lock: required to use the KVM pointer to update a 182 * KVM guest's APCB. 183 * 2. q->matrix_mdev->kvm->lock: required to update a guest's APCB 184 * 3. matrix_dev->mdevs_lock: required to access data stored in matrix_mdev 185 * 186 * Note: if @queue is not linked to an ap_matrix_mdev object, the KVM lock 187 * will not be taken. 188 */ 189 static inline void get_update_locks_for_queue(struct vfio_ap_queue *q) 190 { 191 mutex_lock(&matrix_dev->guests_lock); 192 if (q->matrix_mdev && q->matrix_mdev->kvm) 193 mutex_lock(&q->matrix_mdev->kvm->lock); 194 mutex_lock(&matrix_dev->mdevs_lock); 195 } 196 197 /** 198 * vfio_ap_mdev_get_queue - retrieve a queue with a specific APQN from a 199 * hash table of queues assigned to a matrix mdev 200 * @matrix_mdev: the matrix mdev 201 * @apqn: The APQN of a queue device 202 * 203 * Return: the pointer to the vfio_ap_queue struct representing the queue or 204 * NULL if the queue is not assigned to @matrix_mdev 205 */ 206 static struct vfio_ap_queue *vfio_ap_mdev_get_queue( 207 struct ap_matrix_mdev *matrix_mdev, 208 int apqn) 209 { 210 struct vfio_ap_queue *q; 211 212 hash_for_each_possible(matrix_mdev->qtable.queues, q, mdev_qnode, 213 apqn) { 214 if (q && q->apqn == apqn) 215 return q; 216 } 217 218 return NULL; 219 } 220 221 /** 222 * vfio_ap_wait_for_irqclear - clears the IR bit or gives up after 5 tries 223 * @apqn: The AP Queue number 224 * 225 * Checks the IRQ bit for the status of this APQN using ap_tapq. 226 * Returns if the ap_tapq function succeeded and the bit is clear. 227 * Returns if ap_tapq function failed with invalid, deconfigured or 228 * checkstopped AP. 229 * Otherwise retries up to 5 times after waiting 20ms. 230 */ 231 static void vfio_ap_wait_for_irqclear(int apqn) 232 { 233 struct ap_queue_status status; 234 int retry = 5; 235 236 do { 237 status = ap_tapq(apqn, NULL); 238 switch (status.response_code) { 239 case AP_RESPONSE_NORMAL: 240 case AP_RESPONSE_RESET_IN_PROGRESS: 241 if (!status.irq_enabled) 242 return; 243 fallthrough; 244 case AP_RESPONSE_BUSY: 245 msleep(20); 246 break; 247 case AP_RESPONSE_Q_NOT_AVAIL: 248 case AP_RESPONSE_DECONFIGURED: 249 case AP_RESPONSE_CHECKSTOPPED: 250 default: 251 WARN_ONCE(1, "%s: tapq rc %02x: %04x\n", __func__, 252 status.response_code, apqn); 253 return; 254 } 255 } while (--retry); 256 257 WARN_ONCE(1, "%s: tapq rc %02x: %04x could not clear IR bit\n", 258 __func__, status.response_code, apqn); 259 } 260 261 /** 262 * vfio_ap_free_aqic_resources - free vfio_ap_queue resources 263 * @q: The vfio_ap_queue 264 * 265 * Unregisters the ISC in the GIB when the saved ISC not invalid. 266 * Unpins the guest's page holding the NIB when it exists. 267 * Resets the saved_iova and saved_isc to invalid values. 268 */ 269 static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q) 270 { 271 if (!q) 272 return; 273 if (q->saved_isc != VFIO_AP_ISC_INVALID && 274 !WARN_ON(!(q->matrix_mdev && q->matrix_mdev->kvm))) { 275 kvm_s390_gisc_unregister(q->matrix_mdev->kvm, q->saved_isc); 276 q->saved_isc = VFIO_AP_ISC_INVALID; 277 } 278 if (q->saved_iova && !WARN_ON(!q->matrix_mdev)) { 279 vfio_unpin_pages(&q->matrix_mdev->vdev, q->saved_iova, 1); 280 q->saved_iova = 0; 281 } 282 } 283 284 /** 285 * vfio_ap_irq_disable - disables and clears an ap_queue interrupt 286 * @q: The vfio_ap_queue 287 * 288 * Uses ap_aqic to disable the interruption and in case of success, reset 289 * in progress or IRQ disable command already proceeded: calls 290 * vfio_ap_wait_for_irqclear() to check for the IRQ bit to be clear 291 * and calls vfio_ap_free_aqic_resources() to free the resources associated 292 * with the AP interrupt handling. 293 * 294 * In the case the AP is busy, or a reset is in progress, 295 * retries after 20ms, up to 5 times. 296 * 297 * Returns if ap_aqic function failed with invalid, deconfigured or 298 * checkstopped AP. 299 * 300 * Return: &struct ap_queue_status 301 */ 302 static struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q) 303 { 304 union ap_qirq_ctrl aqic_gisa = { .value = 0 }; 305 struct ap_queue_status status; 306 int retries = 5; 307 308 do { 309 status = ap_aqic(q->apqn, aqic_gisa, 0); 310 switch (status.response_code) { 311 case AP_RESPONSE_OTHERWISE_CHANGED: 312 case AP_RESPONSE_NORMAL: 313 vfio_ap_wait_for_irqclear(q->apqn); 314 goto end_free; 315 case AP_RESPONSE_RESET_IN_PROGRESS: 316 case AP_RESPONSE_BUSY: 317 msleep(20); 318 break; 319 case AP_RESPONSE_Q_NOT_AVAIL: 320 case AP_RESPONSE_DECONFIGURED: 321 case AP_RESPONSE_CHECKSTOPPED: 322 case AP_RESPONSE_INVALID_ADDRESS: 323 default: 324 /* All cases in default means AP not operational */ 325 WARN_ONCE(1, "%s: ap_aqic status %d\n", __func__, 326 status.response_code); 327 goto end_free; 328 } 329 } while (retries--); 330 331 WARN_ONCE(1, "%s: ap_aqic status %d\n", __func__, 332 status.response_code); 333 end_free: 334 vfio_ap_free_aqic_resources(q); 335 return status; 336 } 337 338 /** 339 * vfio_ap_validate_nib - validate a notification indicator byte (nib) address. 340 * 341 * @vcpu: the object representing the vcpu executing the PQAP(AQIC) instruction. 342 * @nib: the location for storing the nib address. 343 * 344 * When the PQAP(AQIC) instruction is executed, general register 2 contains the 345 * address of the notification indicator byte (nib) used for IRQ notification. 346 * This function parses and validates the nib from gr2. 347 * 348 * Return: returns zero if the nib address is a valid; otherwise, returns 349 * -EINVAL. 350 */ 351 static int vfio_ap_validate_nib(struct kvm_vcpu *vcpu, dma_addr_t *nib) 352 { 353 *nib = vcpu->run->s.regs.gprs[2]; 354 355 if (!*nib) 356 return -EINVAL; 357 if (kvm_is_error_hva(gfn_to_hva(vcpu->kvm, *nib >> PAGE_SHIFT))) 358 return -EINVAL; 359 360 return 0; 361 } 362 363 /** 364 * ensure_nib_shared() - Ensure the address of the NIB is secure and shared 365 * @addr: the physical (absolute) address of the NIB 366 * 367 * This function checks whether the NIB page, which has been pinned with 368 * vfio_pin_pages(), is a shared page belonging to a secure guest. 369 * 370 * It will call uv_pin_shared() on it; if the page was already pinned shared 371 * (i.e. if the NIB belongs to a secure guest and is shared), then 0 372 * (success) is returned. If the NIB was not shared, vfio_pin_pages() had 373 * exported it and now it does not belong to the secure guest anymore. In 374 * that case, an error is returned. 375 * 376 * Context: the NIB (at physical address @addr) has to be pinned with 377 * vfio_pin_pages() before calling this function. 378 * 379 * Return: 0 in case of success, otherwise an error < 0. 380 */ 381 static int ensure_nib_shared(unsigned long addr) 382 { 383 /* 384 * The nib has to be located in shared storage since guest and 385 * host access it. vfio_pin_pages() will do a pin shared and 386 * if that fails (possibly because it's not a shared page) it 387 * calls export. We try to do a second pin shared here so that 388 * the UV gives us an error code if we try to pin a non-shared 389 * page. 390 * 391 * If the page is already pinned shared the UV will return a success. 392 */ 393 return uv_pin_shared(addr); 394 } 395 396 /** 397 * vfio_ap_irq_enable - Enable Interruption for a APQN 398 * 399 * @q: the vfio_ap_queue holding AQIC parameters 400 * @isc: the guest ISC to register with the GIB interface 401 * @vcpu: the vcpu object containing the registers specifying the parameters 402 * passed to the PQAP(AQIC) instruction. 403 * 404 * Pin the NIB saved in *q 405 * Register the guest ISC to GIB interface and retrieve the 406 * host ISC to issue the host side PQAP/AQIC 407 * 408 * status.response_code may be set to AP_RESPONSE_INVALID_ADDRESS in case the 409 * vfio_pin_pages or kvm_s390_gisc_register failed. 410 * 411 * Otherwise return the ap_queue_status returned by the ap_aqic(), 412 * all retry handling will be done by the guest. 413 * 414 * Return: &struct ap_queue_status 415 */ 416 static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q, 417 int isc, 418 struct kvm_vcpu *vcpu) 419 { 420 union ap_qirq_ctrl aqic_gisa = { .value = 0 }; 421 struct ap_queue_status status = {}; 422 struct kvm_s390_gisa *gisa; 423 struct page *h_page; 424 int nisc; 425 struct kvm *kvm; 426 phys_addr_t h_nib; 427 dma_addr_t nib; 428 int ret; 429 430 /* Verify that the notification indicator byte address is valid */ 431 if (vfio_ap_validate_nib(vcpu, &nib)) { 432 VFIO_AP_DBF_WARN("%s: invalid NIB address: nib=%pad, apqn=%#04x\n", 433 __func__, &nib, q->apqn); 434 435 status.response_code = AP_RESPONSE_INVALID_ADDRESS; 436 return status; 437 } 438 439 /* The pin will probably be successful even if the NIB was not shared */ 440 ret = vfio_pin_pages(&q->matrix_mdev->vdev, nib, 1, 441 IOMMU_READ | IOMMU_WRITE, &h_page); 442 switch (ret) { 443 case 1: 444 break; 445 default: 446 VFIO_AP_DBF_WARN("%s: vfio_pin_pages failed: rc=%d," 447 "nib=%pad, apqn=%#04x\n", 448 __func__, ret, &nib, q->apqn); 449 450 status.response_code = AP_RESPONSE_INVALID_ADDRESS; 451 return status; 452 } 453 454 kvm = q->matrix_mdev->kvm; 455 gisa = kvm->arch.gisa_int.origin; 456 457 h_nib = page_to_phys(h_page) | (nib & ~PAGE_MASK); 458 aqic_gisa.gisc = isc; 459 460 /* NIB in non-shared storage is a rc 6 for PV guests */ 461 if (kvm_s390_pv_cpu_is_protected(vcpu) && 462 ensure_nib_shared(h_nib & PAGE_MASK)) { 463 vfio_unpin_pages(&q->matrix_mdev->vdev, nib, 1); 464 status.response_code = AP_RESPONSE_INVALID_ADDRESS; 465 return status; 466 } 467 468 nisc = kvm_s390_gisc_register(kvm, isc); 469 if (nisc < 0) { 470 VFIO_AP_DBF_WARN("%s: gisc registration failed: nisc=%d, isc=%d, apqn=%#04x\n", 471 __func__, nisc, isc, q->apqn); 472 473 vfio_unpin_pages(&q->matrix_mdev->vdev, nib, 1); 474 status.response_code = AP_RESPONSE_INVALID_ADDRESS; 475 return status; 476 } 477 478 aqic_gisa.isc = nisc; 479 aqic_gisa.ir = 1; 480 aqic_gisa.gisa = virt_to_phys(gisa) >> 4; 481 482 status = ap_aqic(q->apqn, aqic_gisa, h_nib); 483 switch (status.response_code) { 484 case AP_RESPONSE_NORMAL: 485 /* See if we did clear older IRQ configuration */ 486 vfio_ap_free_aqic_resources(q); 487 q->saved_iova = nib; 488 q->saved_isc = isc; 489 break; 490 case AP_RESPONSE_OTHERWISE_CHANGED: 491 /* We could not modify IRQ settings: clear new configuration */ 492 ret = kvm_s390_gisc_unregister(kvm, isc); 493 if (ret) 494 VFIO_AP_DBF_WARN("%s: kvm_s390_gisc_unregister: rc=%d isc=%d, apqn=%#04x\n", 495 __func__, ret, isc, q->apqn); 496 vfio_unpin_pages(&q->matrix_mdev->vdev, nib, 1); 497 break; 498 default: 499 pr_warn("%s: apqn %04x: response: %02x\n", __func__, q->apqn, 500 status.response_code); 501 vfio_ap_irq_disable(q); 502 break; 503 } 504 505 if (status.response_code != AP_RESPONSE_NORMAL) { 506 VFIO_AP_DBF_WARN("%s: PQAP(AQIC) failed with status=%#02x: " 507 "zone=%#x, ir=%#x, gisc=%#x, f=%#x," 508 "gisa=%#x, isc=%#x, apqn=%#04x\n", 509 __func__, status.response_code, 510 aqic_gisa.zone, aqic_gisa.ir, aqic_gisa.gisc, 511 aqic_gisa.gf, aqic_gisa.gisa, aqic_gisa.isc, 512 q->apqn); 513 } 514 515 return status; 516 } 517 518 /** 519 * vfio_ap_le_guid_to_be_uuid - convert a little endian guid array into an array 520 * of big endian elements that can be passed by 521 * value to an s390dbf sprintf event function to 522 * format a UUID string. 523 * 524 * @guid: the object containing the little endian guid 525 * @uuid: a six-element array of long values that can be passed by value as 526 * arguments for a formatting string specifying a UUID. 527 * 528 * The S390 Debug Feature (s390dbf) allows the use of "%s" in the sprintf 529 * event functions if the memory for the passed string is available as long as 530 * the debug feature exists. Since a mediated device can be removed at any 531 * time, it's name can not be used because %s passes the reference to the string 532 * in memory and the reference will go stale once the device is removed . 533 * 534 * The s390dbf string formatting function allows a maximum of 9 arguments for a 535 * message to be displayed in the 'sprintf' view. In order to use the bytes 536 * comprising the mediated device's UUID to display the mediated device name, 537 * they will have to be converted into an array whose elements can be passed by 538 * value to sprintf. For example: 539 * 540 * guid array: { 83, 78, 17, 62, bb, f1, f0, 47, 91, 4d, 32, a2, 2e, 3a, 88, 04 } 541 * mdev name: 62177883-f1bb-47f0-914d-32a22e3a8804 542 * array returned: { 62177883, f1bb, 47f0, 914d, 32a2, 2e3a8804 } 543 * formatting string: "%08lx-%04lx-%04lx-%04lx-%02lx%04lx" 544 */ 545 static void vfio_ap_le_guid_to_be_uuid(guid_t *guid, unsigned long *uuid) 546 { 547 /* 548 * The input guid is ordered in little endian, so it needs to be 549 * reordered for displaying a UUID as a string. This specifies the 550 * guid indices in proper order. 551 */ 552 uuid[0] = le32_to_cpup((__le32 *)guid); 553 uuid[1] = le16_to_cpup((__le16 *)&guid->b[4]); 554 uuid[2] = le16_to_cpup((__le16 *)&guid->b[6]); 555 uuid[3] = *((__u16 *)&guid->b[8]); 556 uuid[4] = *((__u16 *)&guid->b[10]); 557 uuid[5] = *((__u32 *)&guid->b[12]); 558 } 559 560 /** 561 * handle_pqap - PQAP instruction callback 562 * 563 * @vcpu: The vcpu on which we received the PQAP instruction 564 * 565 * Get the general register contents to initialize internal variables. 566 * REG[0]: APQN 567 * REG[1]: IR and ISC 568 * REG[2]: NIB 569 * 570 * Response.status may be set to following Response Code: 571 * - AP_RESPONSE_Q_NOT_AVAIL: if the queue is not available 572 * - AP_RESPONSE_DECONFIGURED: if the queue is not configured 573 * - AP_RESPONSE_NORMAL (0) : in case of success 574 * Check vfio_ap_setirq() and vfio_ap_clrirq() for other possible RC. 575 * We take the matrix_dev lock to ensure serialization on queues and 576 * mediated device access. 577 * 578 * Return: 0 if we could handle the request inside KVM. 579 * Otherwise, returns -EOPNOTSUPP to let QEMU handle the fault. 580 */ 581 static int handle_pqap(struct kvm_vcpu *vcpu) 582 { 583 uint64_t status; 584 uint16_t apqn; 585 unsigned long uuid[6]; 586 struct vfio_ap_queue *q; 587 struct ap_queue_status qstatus = { 588 .response_code = AP_RESPONSE_Q_NOT_AVAIL, }; 589 struct ap_matrix_mdev *matrix_mdev; 590 591 apqn = vcpu->run->s.regs.gprs[0] & 0xffff; 592 593 /* If we do not use the AIV facility just go to userland */ 594 if (!(vcpu->arch.sie_block->eca & ECA_AIV)) { 595 VFIO_AP_DBF_WARN("%s: AIV facility not installed: apqn=0x%04x, eca=0x%04x\n", 596 __func__, apqn, vcpu->arch.sie_block->eca); 597 598 return -EOPNOTSUPP; 599 } 600 601 mutex_lock(&matrix_dev->mdevs_lock); 602 603 if (!vcpu->kvm->arch.crypto.pqap_hook) { 604 VFIO_AP_DBF_WARN("%s: PQAP(AQIC) hook not registered with the vfio_ap driver: apqn=0x%04x\n", 605 __func__, apqn); 606 607 goto out_unlock; 608 } 609 610 matrix_mdev = container_of(vcpu->kvm->arch.crypto.pqap_hook, 611 struct ap_matrix_mdev, pqap_hook); 612 613 /* If the there is no guest using the mdev, there is nothing to do */ 614 if (!matrix_mdev->kvm) { 615 vfio_ap_le_guid_to_be_uuid(&matrix_mdev->mdev->uuid, uuid); 616 VFIO_AP_DBF_WARN("%s: mdev %08lx-%04lx-%04lx-%04lx-%04lx%08lx not in use: apqn=0x%04x\n", 617 __func__, uuid[0], uuid[1], uuid[2], 618 uuid[3], uuid[4], uuid[5], apqn); 619 goto out_unlock; 620 } 621 622 q = vfio_ap_mdev_get_queue(matrix_mdev, apqn); 623 if (!q) { 624 VFIO_AP_DBF_WARN("%s: Queue %02x.%04x not bound to the vfio_ap driver\n", 625 __func__, AP_QID_CARD(apqn), 626 AP_QID_QUEUE(apqn)); 627 goto out_unlock; 628 } 629 630 status = vcpu->run->s.regs.gprs[1]; 631 632 /* If IR bit(16) is set we enable the interrupt */ 633 if ((status >> (63 - 16)) & 0x01) 634 qstatus = vfio_ap_irq_enable(q, status & 0x07, vcpu); 635 else 636 qstatus = vfio_ap_irq_disable(q); 637 638 out_unlock: 639 memcpy(&vcpu->run->s.regs.gprs[1], &qstatus, sizeof(qstatus)); 640 vcpu->run->s.regs.gprs[1] >>= 32; 641 mutex_unlock(&matrix_dev->mdevs_lock); 642 return 0; 643 } 644 645 static void vfio_ap_matrix_init(struct ap_config_info *info, 646 struct ap_matrix *matrix) 647 { 648 matrix->apm_max = info->apxa ? info->na : 63; 649 matrix->aqm_max = info->apxa ? info->nd : 15; 650 matrix->adm_max = info->apxa ? info->nd : 15; 651 } 652 653 static void vfio_ap_mdev_update_guest_apcb(struct ap_matrix_mdev *matrix_mdev) 654 { 655 if (matrix_mdev->kvm) 656 kvm_arch_crypto_set_masks(matrix_mdev->kvm, 657 matrix_mdev->shadow_apcb.apm, 658 matrix_mdev->shadow_apcb.aqm, 659 matrix_mdev->shadow_apcb.adm); 660 } 661 662 static bool vfio_ap_mdev_filter_cdoms(struct ap_matrix_mdev *matrix_mdev) 663 { 664 DECLARE_BITMAP(prev_shadow_adm, AP_DOMAINS); 665 666 bitmap_copy(prev_shadow_adm, matrix_mdev->shadow_apcb.adm, AP_DOMAINS); 667 bitmap_and(matrix_mdev->shadow_apcb.adm, matrix_mdev->matrix.adm, 668 (unsigned long *)matrix_dev->info.adm, AP_DOMAINS); 669 670 return !bitmap_equal(prev_shadow_adm, matrix_mdev->shadow_apcb.adm, 671 AP_DOMAINS); 672 } 673 674 static bool _queue_passable(struct vfio_ap_queue *q) 675 { 676 if (!q) 677 return false; 678 679 switch (q->reset_status.response_code) { 680 case AP_RESPONSE_NORMAL: 681 case AP_RESPONSE_DECONFIGURED: 682 case AP_RESPONSE_CHECKSTOPPED: 683 return true; 684 default: 685 return false; 686 } 687 } 688 689 /* 690 * vfio_ap_mdev_filter_matrix - filter the APQNs assigned to the matrix mdev 691 * to ensure no queue devices are passed through to 692 * the guest that are not bound to the vfio_ap 693 * device driver. 694 * 695 * @matrix_mdev: the matrix mdev whose matrix is to be filtered. 696 * @apm_filtered: a 256-bit bitmap for storing the APIDs filtered from the 697 * guest's AP configuration that are still in the host's AP 698 * configuration. 699 * 700 * Note: If an APQN referencing a queue device that is not bound to the vfio_ap 701 * driver, its APID will be filtered from the guest's APCB. The matrix 702 * structure precludes filtering an individual APQN, so its APID will be 703 * filtered. Consequently, all queues associated with the adapter that 704 * are in the host's AP configuration must be reset. If queues are 705 * subsequently made available again to the guest, they should re-appear 706 * in a reset state 707 * 708 * Return: a boolean value indicating whether the KVM guest's APCB was changed 709 * by the filtering or not. 710 */ 711 static bool vfio_ap_mdev_filter_matrix(struct ap_matrix_mdev *matrix_mdev, 712 unsigned long *apm_filtered) 713 { 714 unsigned long apid, apqi, apqn; 715 DECLARE_BITMAP(prev_shadow_apm, AP_DEVICES); 716 DECLARE_BITMAP(prev_shadow_aqm, AP_DOMAINS); 717 718 bitmap_copy(prev_shadow_apm, matrix_mdev->shadow_apcb.apm, AP_DEVICES); 719 bitmap_copy(prev_shadow_aqm, matrix_mdev->shadow_apcb.aqm, AP_DOMAINS); 720 vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->shadow_apcb); 721 bitmap_clear(apm_filtered, 0, AP_DEVICES); 722 723 /* 724 * Copy the adapters, domains and control domains to the shadow_apcb 725 * from the matrix mdev, but only those that are assigned to the host's 726 * AP configuration. 727 */ 728 bitmap_and(matrix_mdev->shadow_apcb.apm, matrix_mdev->matrix.apm, 729 (unsigned long *)matrix_dev->info.apm, AP_DEVICES); 730 bitmap_and(matrix_mdev->shadow_apcb.aqm, matrix_mdev->matrix.aqm, 731 (unsigned long *)matrix_dev->info.aqm, AP_DOMAINS); 732 733 for_each_set_bit_inv(apid, matrix_mdev->shadow_apcb.apm, AP_DEVICES) { 734 for_each_set_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm, 735 AP_DOMAINS) { 736 /* 737 * If the APQN is not bound to the vfio_ap device 738 * driver, then we can't assign it to the guest's 739 * AP configuration. The AP architecture won't 740 * allow filtering of a single APQN, so let's filter 741 * the APID since an adapter represents a physical 742 * hardware device. 743 */ 744 apqn = AP_MKQID(apid, apqi); 745 if (!_queue_passable(vfio_ap_mdev_get_queue(matrix_mdev, apqn))) { 746 clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm); 747 748 /* 749 * If the adapter was previously plugged into 750 * the guest, let's let the caller know that 751 * the APID was filtered. 752 */ 753 if (test_bit_inv(apid, prev_shadow_apm)) 754 set_bit_inv(apid, apm_filtered); 755 756 break; 757 } 758 } 759 } 760 761 return !bitmap_equal(prev_shadow_apm, matrix_mdev->shadow_apcb.apm, 762 AP_DEVICES) || 763 !bitmap_equal(prev_shadow_aqm, matrix_mdev->shadow_apcb.aqm, 764 AP_DOMAINS); 765 } 766 767 static int vfio_ap_mdev_init_dev(struct vfio_device *vdev) 768 { 769 struct ap_matrix_mdev *matrix_mdev = 770 container_of(vdev, struct ap_matrix_mdev, vdev); 771 772 matrix_mdev->mdev = to_mdev_device(vdev->dev); 773 vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->matrix); 774 matrix_mdev->pqap_hook = handle_pqap; 775 vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->shadow_apcb); 776 hash_init(matrix_mdev->qtable.queues); 777 778 return 0; 779 } 780 781 static int vfio_ap_mdev_probe(struct mdev_device *mdev) 782 { 783 struct ap_matrix_mdev *matrix_mdev; 784 int ret; 785 786 matrix_mdev = vfio_alloc_device(ap_matrix_mdev, vdev, &mdev->dev, 787 &vfio_ap_matrix_dev_ops); 788 if (IS_ERR(matrix_mdev)) 789 return PTR_ERR(matrix_mdev); 790 791 ret = vfio_register_emulated_iommu_dev(&matrix_mdev->vdev); 792 if (ret) 793 goto err_put_vdev; 794 matrix_mdev->req_trigger = NULL; 795 dev_set_drvdata(&mdev->dev, matrix_mdev); 796 mutex_lock(&matrix_dev->mdevs_lock); 797 list_add(&matrix_mdev->node, &matrix_dev->mdev_list); 798 mutex_unlock(&matrix_dev->mdevs_lock); 799 return 0; 800 801 err_put_vdev: 802 vfio_put_device(&matrix_mdev->vdev); 803 return ret; 804 } 805 806 static void vfio_ap_mdev_link_queue(struct ap_matrix_mdev *matrix_mdev, 807 struct vfio_ap_queue *q) 808 { 809 if (!q || vfio_ap_mdev_get_queue(matrix_mdev, q->apqn)) 810 return; 811 812 q->matrix_mdev = matrix_mdev; 813 hash_add(matrix_mdev->qtable.queues, &q->mdev_qnode, q->apqn); 814 } 815 816 static void vfio_ap_mdev_link_apqn(struct ap_matrix_mdev *matrix_mdev, int apqn) 817 { 818 struct vfio_ap_queue *q; 819 820 q = vfio_ap_find_queue(apqn); 821 vfio_ap_mdev_link_queue(matrix_mdev, q); 822 } 823 824 static void vfio_ap_unlink_queue_fr_mdev(struct vfio_ap_queue *q) 825 { 826 hash_del(&q->mdev_qnode); 827 } 828 829 static void vfio_ap_unlink_mdev_fr_queue(struct vfio_ap_queue *q) 830 { 831 q->matrix_mdev = NULL; 832 } 833 834 static void vfio_ap_mdev_unlink_fr_queues(struct ap_matrix_mdev *matrix_mdev) 835 { 836 struct vfio_ap_queue *q; 837 unsigned long apid, apqi; 838 839 for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES) { 840 for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, 841 AP_DOMAINS) { 842 q = vfio_ap_mdev_get_queue(matrix_mdev, 843 AP_MKQID(apid, apqi)); 844 if (q) 845 q->matrix_mdev = NULL; 846 } 847 } 848 } 849 850 static void vfio_ap_mdev_remove(struct mdev_device *mdev) 851 { 852 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(&mdev->dev); 853 854 vfio_unregister_group_dev(&matrix_mdev->vdev); 855 856 mutex_lock(&matrix_dev->guests_lock); 857 mutex_lock(&matrix_dev->mdevs_lock); 858 vfio_ap_mdev_reset_queues(matrix_mdev); 859 vfio_ap_mdev_unlink_fr_queues(matrix_mdev); 860 list_del(&matrix_mdev->node); 861 mutex_unlock(&matrix_dev->mdevs_lock); 862 mutex_unlock(&matrix_dev->guests_lock); 863 vfio_put_device(&matrix_mdev->vdev); 864 } 865 866 #define MDEV_SHARING_ERR "Userspace may not re-assign queue %02lx.%04lx " \ 867 "already assigned to %s" 868 869 static void vfio_ap_mdev_log_sharing_err(struct ap_matrix_mdev *matrix_mdev, 870 unsigned long *apm, 871 unsigned long *aqm) 872 { 873 unsigned long apid, apqi; 874 const struct device *dev = mdev_dev(matrix_mdev->mdev); 875 const char *mdev_name = dev_name(dev); 876 877 for_each_set_bit_inv(apid, apm, AP_DEVICES) 878 for_each_set_bit_inv(apqi, aqm, AP_DOMAINS) 879 dev_warn(dev, MDEV_SHARING_ERR, apid, apqi, mdev_name); 880 } 881 882 /** 883 * vfio_ap_mdev_verify_no_sharing - verify APQNs are not shared by matrix mdevs 884 * 885 * @mdev_apm: mask indicating the APIDs of the APQNs to be verified 886 * @mdev_aqm: mask indicating the APQIs of the APQNs to be verified 887 * 888 * Verifies that each APQN derived from the Cartesian product of a bitmap of 889 * AP adapter IDs and AP queue indexes is not configured for any matrix 890 * mediated device. AP queue sharing is not allowed. 891 * 892 * Return: 0 if the APQNs are not shared; otherwise return -EADDRINUSE. 893 */ 894 static int vfio_ap_mdev_verify_no_sharing(unsigned long *mdev_apm, 895 unsigned long *mdev_aqm) 896 { 897 struct ap_matrix_mdev *matrix_mdev; 898 DECLARE_BITMAP(apm, AP_DEVICES); 899 DECLARE_BITMAP(aqm, AP_DOMAINS); 900 901 list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) { 902 /* 903 * If the input apm and aqm are fields of the matrix_mdev 904 * object, then move on to the next matrix_mdev. 905 */ 906 if (mdev_apm == matrix_mdev->matrix.apm && 907 mdev_aqm == matrix_mdev->matrix.aqm) 908 continue; 909 910 memset(apm, 0, sizeof(apm)); 911 memset(aqm, 0, sizeof(aqm)); 912 913 /* 914 * We work on full longs, as we can only exclude the leftover 915 * bits in non-inverse order. The leftover is all zeros. 916 */ 917 if (!bitmap_and(apm, mdev_apm, matrix_mdev->matrix.apm, 918 AP_DEVICES)) 919 continue; 920 921 if (!bitmap_and(aqm, mdev_aqm, matrix_mdev->matrix.aqm, 922 AP_DOMAINS)) 923 continue; 924 925 vfio_ap_mdev_log_sharing_err(matrix_mdev, apm, aqm); 926 927 return -EADDRINUSE; 928 } 929 930 return 0; 931 } 932 933 /** 934 * vfio_ap_mdev_validate_masks - verify that the APQNs assigned to the mdev are 935 * not reserved for the default zcrypt driver and 936 * are not assigned to another mdev. 937 * 938 * @matrix_mdev: the mdev to which the APQNs being validated are assigned. 939 * 940 * Return: One of the following values: 941 * o the error returned from the ap_apqn_in_matrix_owned_by_def_drv() function, 942 * most likely -EBUSY indicating the ap_perms_mutex lock is already held. 943 * o EADDRNOTAVAIL if an APQN assigned to @matrix_mdev is reserved for the 944 * zcrypt default driver. 945 * o EADDRINUSE if an APQN assigned to @matrix_mdev is assigned to another mdev 946 * o A zero indicating validation succeeded. 947 */ 948 static int vfio_ap_mdev_validate_masks(struct ap_matrix_mdev *matrix_mdev) 949 { 950 if (ap_apqn_in_matrix_owned_by_def_drv(matrix_mdev->matrix.apm, 951 matrix_mdev->matrix.aqm)) 952 return -EADDRNOTAVAIL; 953 954 return vfio_ap_mdev_verify_no_sharing(matrix_mdev->matrix.apm, 955 matrix_mdev->matrix.aqm); 956 } 957 958 static void vfio_ap_mdev_link_adapter(struct ap_matrix_mdev *matrix_mdev, 959 unsigned long apid) 960 { 961 unsigned long apqi; 962 963 for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, AP_DOMAINS) 964 vfio_ap_mdev_link_apqn(matrix_mdev, 965 AP_MKQID(apid, apqi)); 966 } 967 968 static void collect_queues_to_reset(struct ap_matrix_mdev *matrix_mdev, 969 unsigned long apid, 970 struct list_head *qlist) 971 { 972 struct vfio_ap_queue *q; 973 unsigned long apqi; 974 975 for_each_set_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm, AP_DOMAINS) { 976 q = vfio_ap_mdev_get_queue(matrix_mdev, AP_MKQID(apid, apqi)); 977 if (q) 978 list_add_tail(&q->reset_qnode, qlist); 979 } 980 } 981 982 static void reset_queues_for_apid(struct ap_matrix_mdev *matrix_mdev, 983 unsigned long apid) 984 { 985 struct list_head qlist; 986 987 INIT_LIST_HEAD(&qlist); 988 collect_queues_to_reset(matrix_mdev, apid, &qlist); 989 vfio_ap_mdev_reset_qlist(&qlist); 990 } 991 992 static int reset_queues_for_apids(struct ap_matrix_mdev *matrix_mdev, 993 unsigned long *apm_reset) 994 { 995 struct list_head qlist; 996 unsigned long apid; 997 998 if (bitmap_empty(apm_reset, AP_DEVICES)) 999 return 0; 1000 1001 INIT_LIST_HEAD(&qlist); 1002 1003 for_each_set_bit_inv(apid, apm_reset, AP_DEVICES) 1004 collect_queues_to_reset(matrix_mdev, apid, &qlist); 1005 1006 return vfio_ap_mdev_reset_qlist(&qlist); 1007 } 1008 1009 /** 1010 * assign_adapter_store - parses the APID from @buf and sets the 1011 * corresponding bit in the mediated matrix device's APM 1012 * 1013 * @dev: the matrix device 1014 * @attr: the mediated matrix device's assign_adapter attribute 1015 * @buf: a buffer containing the AP adapter number (APID) to 1016 * be assigned 1017 * @count: the number of bytes in @buf 1018 * 1019 * Return: the number of bytes processed if the APID is valid; otherwise, 1020 * returns one of the following errors: 1021 * 1022 * 1. -EINVAL 1023 * The APID is not a valid number 1024 * 1025 * 2. -ENODEV 1026 * The APID exceeds the maximum value configured for the system 1027 * 1028 * 3. -EADDRNOTAVAIL 1029 * An APQN derived from the cross product of the APID being assigned 1030 * and the APQIs previously assigned is not bound to the vfio_ap device 1031 * driver; or, if no APQIs have yet been assigned, the APID is not 1032 * contained in an APQN bound to the vfio_ap device driver. 1033 * 1034 * 4. -EADDRINUSE 1035 * An APQN derived from the cross product of the APID being assigned 1036 * and the APQIs previously assigned is being used by another mediated 1037 * matrix device 1038 * 1039 * 5. -EAGAIN 1040 * A lock required to validate the mdev's AP configuration could not 1041 * be obtained. 1042 */ 1043 static ssize_t assign_adapter_store(struct device *dev, 1044 struct device_attribute *attr, 1045 const char *buf, size_t count) 1046 { 1047 int ret; 1048 unsigned long apid; 1049 DECLARE_BITMAP(apm_filtered, AP_DEVICES); 1050 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); 1051 1052 mutex_lock(&ap_perms_mutex); 1053 get_update_locks_for_mdev(matrix_mdev); 1054 1055 ret = kstrtoul(buf, 0, &apid); 1056 if (ret) 1057 goto done; 1058 1059 if (apid > matrix_mdev->matrix.apm_max) { 1060 ret = -ENODEV; 1061 goto done; 1062 } 1063 1064 if (test_bit_inv(apid, matrix_mdev->matrix.apm)) { 1065 ret = count; 1066 goto done; 1067 } 1068 1069 set_bit_inv(apid, matrix_mdev->matrix.apm); 1070 1071 ret = vfio_ap_mdev_validate_masks(matrix_mdev); 1072 if (ret) { 1073 clear_bit_inv(apid, matrix_mdev->matrix.apm); 1074 goto done; 1075 } 1076 1077 vfio_ap_mdev_link_adapter(matrix_mdev, apid); 1078 1079 if (vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered)) { 1080 vfio_ap_mdev_update_guest_apcb(matrix_mdev); 1081 reset_queues_for_apids(matrix_mdev, apm_filtered); 1082 } 1083 1084 ret = count; 1085 done: 1086 release_update_locks_for_mdev(matrix_mdev); 1087 mutex_unlock(&ap_perms_mutex); 1088 1089 return ret; 1090 } 1091 static DEVICE_ATTR_WO(assign_adapter); 1092 1093 static struct vfio_ap_queue 1094 *vfio_ap_unlink_apqn_fr_mdev(struct ap_matrix_mdev *matrix_mdev, 1095 unsigned long apid, unsigned long apqi) 1096 { 1097 struct vfio_ap_queue *q = NULL; 1098 1099 q = vfio_ap_mdev_get_queue(matrix_mdev, AP_MKQID(apid, apqi)); 1100 /* If the queue is assigned to the matrix mdev, unlink it. */ 1101 if (q) 1102 vfio_ap_unlink_queue_fr_mdev(q); 1103 1104 return q; 1105 } 1106 1107 /** 1108 * vfio_ap_mdev_unlink_adapter - unlink all queues associated with unassigned 1109 * adapter from the matrix mdev to which the 1110 * adapter was assigned. 1111 * @matrix_mdev: the matrix mediated device to which the adapter was assigned. 1112 * @apid: the APID of the unassigned adapter. 1113 * @qlist: list for storing queues associated with unassigned adapter that 1114 * need to be reset. 1115 */ 1116 static void vfio_ap_mdev_unlink_adapter(struct ap_matrix_mdev *matrix_mdev, 1117 unsigned long apid, 1118 struct list_head *qlist) 1119 { 1120 unsigned long apqi; 1121 struct vfio_ap_queue *q; 1122 1123 for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, AP_DOMAINS) { 1124 q = vfio_ap_unlink_apqn_fr_mdev(matrix_mdev, apid, apqi); 1125 1126 if (q && qlist) { 1127 if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) && 1128 test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) 1129 list_add_tail(&q->reset_qnode, qlist); 1130 } 1131 } 1132 } 1133 1134 static void vfio_ap_mdev_hot_unplug_adapters(struct ap_matrix_mdev *matrix_mdev, 1135 unsigned long *apids) 1136 { 1137 struct vfio_ap_queue *q, *tmpq; 1138 struct list_head qlist; 1139 unsigned long apid; 1140 bool apcb_update = false; 1141 1142 INIT_LIST_HEAD(&qlist); 1143 1144 for_each_set_bit_inv(apid, apids, AP_DEVICES) { 1145 vfio_ap_mdev_unlink_adapter(matrix_mdev, apid, &qlist); 1146 1147 if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm)) { 1148 clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm); 1149 apcb_update = true; 1150 } 1151 } 1152 1153 /* Only update apcb if needed to avoid impacting guest */ 1154 if (apcb_update) 1155 vfio_ap_mdev_update_guest_apcb(matrix_mdev); 1156 1157 vfio_ap_mdev_reset_qlist(&qlist); 1158 1159 list_for_each_entry_safe(q, tmpq, &qlist, reset_qnode) { 1160 vfio_ap_unlink_mdev_fr_queue(q); 1161 list_del(&q->reset_qnode); 1162 } 1163 } 1164 1165 static void vfio_ap_mdev_hot_unplug_adapter(struct ap_matrix_mdev *matrix_mdev, 1166 unsigned long apid) 1167 { 1168 DECLARE_BITMAP(apids, AP_DEVICES); 1169 1170 bitmap_zero(apids, AP_DEVICES); 1171 set_bit_inv(apid, apids); 1172 vfio_ap_mdev_hot_unplug_adapters(matrix_mdev, apids); 1173 } 1174 1175 /** 1176 * unassign_adapter_store - parses the APID from @buf and clears the 1177 * corresponding bit in the mediated matrix device's APM 1178 * 1179 * @dev: the matrix device 1180 * @attr: the mediated matrix device's unassign_adapter attribute 1181 * @buf: a buffer containing the adapter number (APID) to be unassigned 1182 * @count: the number of bytes in @buf 1183 * 1184 * Return: the number of bytes processed if the APID is valid; otherwise, 1185 * returns one of the following errors: 1186 * -EINVAL if the APID is not a number 1187 * -ENODEV if the APID it exceeds the maximum value configured for the 1188 * system 1189 */ 1190 static ssize_t unassign_adapter_store(struct device *dev, 1191 struct device_attribute *attr, 1192 const char *buf, size_t count) 1193 { 1194 int ret; 1195 unsigned long apid; 1196 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); 1197 1198 get_update_locks_for_mdev(matrix_mdev); 1199 1200 ret = kstrtoul(buf, 0, &apid); 1201 if (ret) 1202 goto done; 1203 1204 if (apid > matrix_mdev->matrix.apm_max) { 1205 ret = -ENODEV; 1206 goto done; 1207 } 1208 1209 if (!test_bit_inv(apid, matrix_mdev->matrix.apm)) { 1210 ret = count; 1211 goto done; 1212 } 1213 1214 clear_bit_inv((unsigned long)apid, matrix_mdev->matrix.apm); 1215 vfio_ap_mdev_hot_unplug_adapter(matrix_mdev, apid); 1216 ret = count; 1217 done: 1218 release_update_locks_for_mdev(matrix_mdev); 1219 return ret; 1220 } 1221 static DEVICE_ATTR_WO(unassign_adapter); 1222 1223 static void vfio_ap_mdev_link_domain(struct ap_matrix_mdev *matrix_mdev, 1224 unsigned long apqi) 1225 { 1226 unsigned long apid; 1227 1228 for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES) 1229 vfio_ap_mdev_link_apqn(matrix_mdev, 1230 AP_MKQID(apid, apqi)); 1231 } 1232 1233 /** 1234 * assign_domain_store - parses the APQI from @buf and sets the 1235 * corresponding bit in the mediated matrix device's AQM 1236 * 1237 * @dev: the matrix device 1238 * @attr: the mediated matrix device's assign_domain attribute 1239 * @buf: a buffer containing the AP queue index (APQI) of the domain to 1240 * be assigned 1241 * @count: the number of bytes in @buf 1242 * 1243 * Return: the number of bytes processed if the APQI is valid; otherwise returns 1244 * one of the following errors: 1245 * 1246 * 1. -EINVAL 1247 * The APQI is not a valid number 1248 * 1249 * 2. -ENODEV 1250 * The APQI exceeds the maximum value configured for the system 1251 * 1252 * 3. -EADDRNOTAVAIL 1253 * An APQN derived from the cross product of the APQI being assigned 1254 * and the APIDs previously assigned is not bound to the vfio_ap device 1255 * driver; or, if no APIDs have yet been assigned, the APQI is not 1256 * contained in an APQN bound to the vfio_ap device driver. 1257 * 1258 * 4. -EADDRINUSE 1259 * An APQN derived from the cross product of the APQI being assigned 1260 * and the APIDs previously assigned is being used by another mediated 1261 * matrix device 1262 * 1263 * 5. -EAGAIN 1264 * The lock required to validate the mdev's AP configuration could not 1265 * be obtained. 1266 */ 1267 static ssize_t assign_domain_store(struct device *dev, 1268 struct device_attribute *attr, 1269 const char *buf, size_t count) 1270 { 1271 int ret; 1272 unsigned long apqi; 1273 DECLARE_BITMAP(apm_filtered, AP_DEVICES); 1274 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); 1275 1276 mutex_lock(&ap_perms_mutex); 1277 get_update_locks_for_mdev(matrix_mdev); 1278 1279 ret = kstrtoul(buf, 0, &apqi); 1280 if (ret) 1281 goto done; 1282 1283 if (apqi > matrix_mdev->matrix.aqm_max) { 1284 ret = -ENODEV; 1285 goto done; 1286 } 1287 1288 if (test_bit_inv(apqi, matrix_mdev->matrix.aqm)) { 1289 ret = count; 1290 goto done; 1291 } 1292 1293 set_bit_inv(apqi, matrix_mdev->matrix.aqm); 1294 1295 ret = vfio_ap_mdev_validate_masks(matrix_mdev); 1296 if (ret) { 1297 clear_bit_inv(apqi, matrix_mdev->matrix.aqm); 1298 goto done; 1299 } 1300 1301 vfio_ap_mdev_link_domain(matrix_mdev, apqi); 1302 1303 if (vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered)) { 1304 vfio_ap_mdev_update_guest_apcb(matrix_mdev); 1305 reset_queues_for_apids(matrix_mdev, apm_filtered); 1306 } 1307 1308 ret = count; 1309 done: 1310 release_update_locks_for_mdev(matrix_mdev); 1311 mutex_unlock(&ap_perms_mutex); 1312 1313 return ret; 1314 } 1315 static DEVICE_ATTR_WO(assign_domain); 1316 1317 static void vfio_ap_mdev_unlink_domain(struct ap_matrix_mdev *matrix_mdev, 1318 unsigned long apqi, 1319 struct list_head *qlist) 1320 { 1321 unsigned long apid; 1322 struct vfio_ap_queue *q; 1323 1324 for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES) { 1325 q = vfio_ap_unlink_apqn_fr_mdev(matrix_mdev, apid, apqi); 1326 1327 if (q && qlist) { 1328 if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) && 1329 test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) 1330 list_add_tail(&q->reset_qnode, qlist); 1331 } 1332 } 1333 } 1334 1335 static void vfio_ap_mdev_hot_unplug_domains(struct ap_matrix_mdev *matrix_mdev, 1336 unsigned long *apqis) 1337 { 1338 struct vfio_ap_queue *q, *tmpq; 1339 struct list_head qlist; 1340 unsigned long apqi; 1341 bool apcb_update = false; 1342 1343 INIT_LIST_HEAD(&qlist); 1344 1345 for_each_set_bit_inv(apqi, apqis, AP_DOMAINS) { 1346 vfio_ap_mdev_unlink_domain(matrix_mdev, apqi, &qlist); 1347 1348 if (test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) { 1349 clear_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm); 1350 apcb_update = true; 1351 } 1352 } 1353 1354 /* Only update apcb if needed to avoid impacting guest */ 1355 if (apcb_update) 1356 vfio_ap_mdev_update_guest_apcb(matrix_mdev); 1357 1358 vfio_ap_mdev_reset_qlist(&qlist); 1359 1360 list_for_each_entry_safe(q, tmpq, &qlist, reset_qnode) { 1361 vfio_ap_unlink_mdev_fr_queue(q); 1362 list_del(&q->reset_qnode); 1363 } 1364 } 1365 1366 static void vfio_ap_mdev_hot_unplug_domain(struct ap_matrix_mdev *matrix_mdev, 1367 unsigned long apqi) 1368 { 1369 DECLARE_BITMAP(apqis, AP_DOMAINS); 1370 1371 bitmap_zero(apqis, AP_DEVICES); 1372 set_bit_inv(apqi, apqis); 1373 vfio_ap_mdev_hot_unplug_domains(matrix_mdev, apqis); 1374 } 1375 1376 /** 1377 * unassign_domain_store - parses the APQI from @buf and clears the 1378 * corresponding bit in the mediated matrix device's AQM 1379 * 1380 * @dev: the matrix device 1381 * @attr: the mediated matrix device's unassign_domain attribute 1382 * @buf: a buffer containing the AP queue index (APQI) of the domain to 1383 * be unassigned 1384 * @count: the number of bytes in @buf 1385 * 1386 * Return: the number of bytes processed if the APQI is valid; otherwise, 1387 * returns one of the following errors: 1388 * -EINVAL if the APQI is not a number 1389 * -ENODEV if the APQI exceeds the maximum value configured for the system 1390 */ 1391 static ssize_t unassign_domain_store(struct device *dev, 1392 struct device_attribute *attr, 1393 const char *buf, size_t count) 1394 { 1395 int ret; 1396 unsigned long apqi; 1397 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); 1398 1399 get_update_locks_for_mdev(matrix_mdev); 1400 1401 ret = kstrtoul(buf, 0, &apqi); 1402 if (ret) 1403 goto done; 1404 1405 if (apqi > matrix_mdev->matrix.aqm_max) { 1406 ret = -ENODEV; 1407 goto done; 1408 } 1409 1410 if (!test_bit_inv(apqi, matrix_mdev->matrix.aqm)) { 1411 ret = count; 1412 goto done; 1413 } 1414 1415 clear_bit_inv((unsigned long)apqi, matrix_mdev->matrix.aqm); 1416 vfio_ap_mdev_hot_unplug_domain(matrix_mdev, apqi); 1417 ret = count; 1418 1419 done: 1420 release_update_locks_for_mdev(matrix_mdev); 1421 return ret; 1422 } 1423 static DEVICE_ATTR_WO(unassign_domain); 1424 1425 /** 1426 * assign_control_domain_store - parses the domain ID from @buf and sets 1427 * the corresponding bit in the mediated matrix device's ADM 1428 * 1429 * @dev: the matrix device 1430 * @attr: the mediated matrix device's assign_control_domain attribute 1431 * @buf: a buffer containing the domain ID to be assigned 1432 * @count: the number of bytes in @buf 1433 * 1434 * Return: the number of bytes processed if the domain ID is valid; otherwise, 1435 * returns one of the following errors: 1436 * -EINVAL if the ID is not a number 1437 * -ENODEV if the ID exceeds the maximum value configured for the system 1438 */ 1439 static ssize_t assign_control_domain_store(struct device *dev, 1440 struct device_attribute *attr, 1441 const char *buf, size_t count) 1442 { 1443 int ret; 1444 unsigned long id; 1445 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); 1446 1447 get_update_locks_for_mdev(matrix_mdev); 1448 1449 ret = kstrtoul(buf, 0, &id); 1450 if (ret) 1451 goto done; 1452 1453 if (id > matrix_mdev->matrix.adm_max) { 1454 ret = -ENODEV; 1455 goto done; 1456 } 1457 1458 if (test_bit_inv(id, matrix_mdev->matrix.adm)) { 1459 ret = count; 1460 goto done; 1461 } 1462 1463 /* Set the bit in the ADM (bitmask) corresponding to the AP control 1464 * domain number (id). The bits in the mask, from most significant to 1465 * least significant, correspond to IDs 0 up to the one less than the 1466 * number of control domains that can be assigned. 1467 */ 1468 set_bit_inv(id, matrix_mdev->matrix.adm); 1469 if (vfio_ap_mdev_filter_cdoms(matrix_mdev)) 1470 vfio_ap_mdev_update_guest_apcb(matrix_mdev); 1471 1472 ret = count; 1473 done: 1474 release_update_locks_for_mdev(matrix_mdev); 1475 return ret; 1476 } 1477 static DEVICE_ATTR_WO(assign_control_domain); 1478 1479 /** 1480 * unassign_control_domain_store - parses the domain ID from @buf and 1481 * clears the corresponding bit in the mediated matrix device's ADM 1482 * 1483 * @dev: the matrix device 1484 * @attr: the mediated matrix device's unassign_control_domain attribute 1485 * @buf: a buffer containing the domain ID to be unassigned 1486 * @count: the number of bytes in @buf 1487 * 1488 * Return: the number of bytes processed if the domain ID is valid; otherwise, 1489 * returns one of the following errors: 1490 * -EINVAL if the ID is not a number 1491 * -ENODEV if the ID exceeds the maximum value configured for the system 1492 */ 1493 static ssize_t unassign_control_domain_store(struct device *dev, 1494 struct device_attribute *attr, 1495 const char *buf, size_t count) 1496 { 1497 int ret; 1498 unsigned long domid; 1499 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); 1500 1501 get_update_locks_for_mdev(matrix_mdev); 1502 1503 ret = kstrtoul(buf, 0, &domid); 1504 if (ret) 1505 goto done; 1506 1507 if (domid > matrix_mdev->matrix.adm_max) { 1508 ret = -ENODEV; 1509 goto done; 1510 } 1511 1512 if (!test_bit_inv(domid, matrix_mdev->matrix.adm)) { 1513 ret = count; 1514 goto done; 1515 } 1516 1517 clear_bit_inv(domid, matrix_mdev->matrix.adm); 1518 1519 if (test_bit_inv(domid, matrix_mdev->shadow_apcb.adm)) { 1520 clear_bit_inv(domid, matrix_mdev->shadow_apcb.adm); 1521 vfio_ap_mdev_update_guest_apcb(matrix_mdev); 1522 } 1523 1524 ret = count; 1525 done: 1526 release_update_locks_for_mdev(matrix_mdev); 1527 return ret; 1528 } 1529 static DEVICE_ATTR_WO(unassign_control_domain); 1530 1531 static ssize_t control_domains_show(struct device *dev, 1532 struct device_attribute *dev_attr, 1533 char *buf) 1534 { 1535 unsigned long id; 1536 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); 1537 unsigned long max_domid = matrix_mdev->matrix.adm_max; 1538 int nchars = 0; 1539 1540 mutex_lock(&matrix_dev->mdevs_lock); 1541 for_each_set_bit_inv(id, matrix_mdev->matrix.adm, max_domid + 1) 1542 nchars += sysfs_emit_at(buf, nchars, "%04lx\n", id); 1543 mutex_unlock(&matrix_dev->mdevs_lock); 1544 1545 return nchars; 1546 } 1547 static DEVICE_ATTR_RO(control_domains); 1548 1549 static ssize_t vfio_ap_mdev_matrix_show(struct ap_matrix *matrix, char *buf) 1550 { 1551 unsigned long apid; 1552 unsigned long apqi; 1553 unsigned long apid1; 1554 unsigned long apqi1; 1555 unsigned long napm_bits = matrix->apm_max + 1; 1556 unsigned long naqm_bits = matrix->aqm_max + 1; 1557 int nchars = 0; 1558 1559 apid1 = find_first_bit_inv(matrix->apm, napm_bits); 1560 apqi1 = find_first_bit_inv(matrix->aqm, naqm_bits); 1561 1562 if ((apid1 < napm_bits) && (apqi1 < naqm_bits)) { 1563 for_each_set_bit_inv(apid, matrix->apm, napm_bits) { 1564 for_each_set_bit_inv(apqi, matrix->aqm, naqm_bits) 1565 nchars += sysfs_emit_at(buf, nchars, "%02lx.%04lx\n", apid, apqi); 1566 } 1567 } else if (apid1 < napm_bits) { 1568 for_each_set_bit_inv(apid, matrix->apm, napm_bits) 1569 nchars += sysfs_emit_at(buf, nchars, "%02lx.\n", apid); 1570 } else if (apqi1 < naqm_bits) { 1571 for_each_set_bit_inv(apqi, matrix->aqm, naqm_bits) 1572 nchars += sysfs_emit_at(buf, nchars, ".%04lx\n", apqi); 1573 } 1574 1575 return nchars; 1576 } 1577 1578 static ssize_t matrix_show(struct device *dev, struct device_attribute *attr, 1579 char *buf) 1580 { 1581 ssize_t nchars; 1582 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); 1583 1584 mutex_lock(&matrix_dev->mdevs_lock); 1585 nchars = vfio_ap_mdev_matrix_show(&matrix_mdev->matrix, buf); 1586 mutex_unlock(&matrix_dev->mdevs_lock); 1587 1588 return nchars; 1589 } 1590 static DEVICE_ATTR_RO(matrix); 1591 1592 static ssize_t guest_matrix_show(struct device *dev, 1593 struct device_attribute *attr, char *buf) 1594 { 1595 ssize_t nchars; 1596 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); 1597 1598 mutex_lock(&matrix_dev->mdevs_lock); 1599 nchars = vfio_ap_mdev_matrix_show(&matrix_mdev->shadow_apcb, buf); 1600 mutex_unlock(&matrix_dev->mdevs_lock); 1601 1602 return nchars; 1603 } 1604 static DEVICE_ATTR_RO(guest_matrix); 1605 1606 static ssize_t write_ap_bitmap(unsigned long *bitmap, char *buf, int offset, char sep) 1607 { 1608 return sysfs_emit_at(buf, offset, "0x%016lx%016lx%016lx%016lx%c", 1609 bitmap[0], bitmap[1], bitmap[2], bitmap[3], sep); 1610 } 1611 1612 static ssize_t ap_config_show(struct device *dev, struct device_attribute *attr, 1613 char *buf) 1614 { 1615 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); 1616 int idx = 0; 1617 1618 idx += write_ap_bitmap(matrix_mdev->matrix.apm, buf, idx, ','); 1619 idx += write_ap_bitmap(matrix_mdev->matrix.aqm, buf, idx, ','); 1620 idx += write_ap_bitmap(matrix_mdev->matrix.adm, buf, idx, '\n'); 1621 1622 return idx; 1623 } 1624 1625 /* Number of characters needed for a complete hex mask representing the bits in .. */ 1626 #define AP_DEVICES_STRLEN (AP_DEVICES / 4 + 3) 1627 #define AP_DOMAINS_STRLEN (AP_DOMAINS / 4 + 3) 1628 #define AP_CONFIG_STRLEN (AP_DEVICES_STRLEN + 2 * AP_DOMAINS_STRLEN) 1629 1630 static int parse_bitmap(char **strbufptr, unsigned long *bitmap, int nbits) 1631 { 1632 char *curmask; 1633 1634 curmask = strsep(strbufptr, ",\n"); 1635 if (!curmask) 1636 return -EINVAL; 1637 1638 bitmap_clear(bitmap, 0, nbits); 1639 return ap_hex2bitmap(curmask, bitmap, nbits); 1640 } 1641 1642 static int ap_matrix_overflow_check(struct ap_matrix_mdev *matrix_mdev) 1643 { 1644 unsigned long bit; 1645 1646 for_each_set_bit_inv(bit, matrix_mdev->matrix.apm, AP_DEVICES) { 1647 if (bit > matrix_mdev->matrix.apm_max) 1648 return -ENODEV; 1649 } 1650 1651 for_each_set_bit_inv(bit, matrix_mdev->matrix.aqm, AP_DOMAINS) { 1652 if (bit > matrix_mdev->matrix.aqm_max) 1653 return -ENODEV; 1654 } 1655 1656 for_each_set_bit_inv(bit, matrix_mdev->matrix.adm, AP_DOMAINS) { 1657 if (bit > matrix_mdev->matrix.adm_max) 1658 return -ENODEV; 1659 } 1660 1661 return 0; 1662 } 1663 1664 static void ap_matrix_copy(struct ap_matrix *dst, struct ap_matrix *src) 1665 { 1666 /* This check works around false positive gcc -Wstringop-overread */ 1667 if (!src) 1668 return; 1669 1670 bitmap_copy(dst->apm, src->apm, AP_DEVICES); 1671 bitmap_copy(dst->aqm, src->aqm, AP_DOMAINS); 1672 bitmap_copy(dst->adm, src->adm, AP_DOMAINS); 1673 } 1674 1675 static ssize_t ap_config_store(struct device *dev, struct device_attribute *attr, 1676 const char *buf, size_t count) 1677 { 1678 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); 1679 struct ap_matrix m_new, m_old, m_added, m_removed; 1680 DECLARE_BITMAP(apm_filtered, AP_DEVICES); 1681 unsigned long newbit; 1682 char *newbuf, *rest; 1683 int rc = count; 1684 bool do_update; 1685 1686 newbuf = kstrndup(buf, AP_CONFIG_STRLEN, GFP_KERNEL); 1687 if (!newbuf) 1688 return -ENOMEM; 1689 rest = newbuf; 1690 1691 mutex_lock(&ap_perms_mutex); 1692 get_update_locks_for_mdev(matrix_mdev); 1693 1694 /* Save old state */ 1695 ap_matrix_copy(&m_old, &matrix_mdev->matrix); 1696 if (parse_bitmap(&rest, m_new.apm, AP_DEVICES) || 1697 parse_bitmap(&rest, m_new.aqm, AP_DOMAINS) || 1698 parse_bitmap(&rest, m_new.adm, AP_DOMAINS)) { 1699 rc = -EINVAL; 1700 goto out; 1701 } 1702 1703 bitmap_andnot(m_removed.apm, m_old.apm, m_new.apm, AP_DEVICES); 1704 bitmap_andnot(m_removed.aqm, m_old.aqm, m_new.aqm, AP_DOMAINS); 1705 bitmap_andnot(m_added.apm, m_new.apm, m_old.apm, AP_DEVICES); 1706 bitmap_andnot(m_added.aqm, m_new.aqm, m_old.aqm, AP_DOMAINS); 1707 1708 /* Need new bitmaps in matrix_mdev for validation */ 1709 ap_matrix_copy(&matrix_mdev->matrix, &m_new); 1710 1711 /* Ensure new state is valid, else undo new state */ 1712 rc = vfio_ap_mdev_validate_masks(matrix_mdev); 1713 if (rc) { 1714 ap_matrix_copy(&matrix_mdev->matrix, &m_old); 1715 goto out; 1716 } 1717 rc = ap_matrix_overflow_check(matrix_mdev); 1718 if (rc) { 1719 ap_matrix_copy(&matrix_mdev->matrix, &m_old); 1720 goto out; 1721 } 1722 rc = count; 1723 1724 /* Need old bitmaps in matrix_mdev for unplug/unlink */ 1725 ap_matrix_copy(&matrix_mdev->matrix, &m_old); 1726 1727 /* Unlink removed adapters/domains */ 1728 vfio_ap_mdev_hot_unplug_adapters(matrix_mdev, m_removed.apm); 1729 vfio_ap_mdev_hot_unplug_domains(matrix_mdev, m_removed.aqm); 1730 1731 /* Need new bitmaps in matrix_mdev for linking new adapters/domains */ 1732 ap_matrix_copy(&matrix_mdev->matrix, &m_new); 1733 1734 /* Link newly added adapters */ 1735 for_each_set_bit_inv(newbit, m_added.apm, AP_DEVICES) 1736 vfio_ap_mdev_link_adapter(matrix_mdev, newbit); 1737 1738 for_each_set_bit_inv(newbit, m_added.aqm, AP_DOMAINS) 1739 vfio_ap_mdev_link_domain(matrix_mdev, newbit); 1740 1741 /* filter resources not bound to vfio-ap */ 1742 do_update = vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered); 1743 do_update |= vfio_ap_mdev_filter_cdoms(matrix_mdev); 1744 1745 /* Apply changes to shadow apbc if things changed */ 1746 if (do_update) { 1747 vfio_ap_mdev_update_guest_apcb(matrix_mdev); 1748 reset_queues_for_apids(matrix_mdev, apm_filtered); 1749 } 1750 out: 1751 release_update_locks_for_mdev(matrix_mdev); 1752 mutex_unlock(&ap_perms_mutex); 1753 kfree(newbuf); 1754 return rc; 1755 } 1756 static DEVICE_ATTR_RW(ap_config); 1757 1758 static struct attribute *vfio_ap_mdev_attrs[] = { 1759 &dev_attr_assign_adapter.attr, 1760 &dev_attr_unassign_adapter.attr, 1761 &dev_attr_assign_domain.attr, 1762 &dev_attr_unassign_domain.attr, 1763 &dev_attr_assign_control_domain.attr, 1764 &dev_attr_unassign_control_domain.attr, 1765 &dev_attr_ap_config.attr, 1766 &dev_attr_control_domains.attr, 1767 &dev_attr_matrix.attr, 1768 &dev_attr_guest_matrix.attr, 1769 NULL, 1770 }; 1771 1772 static struct attribute_group vfio_ap_mdev_attr_group = { 1773 .attrs = vfio_ap_mdev_attrs 1774 }; 1775 1776 static const struct attribute_group *vfio_ap_mdev_attr_groups[] = { 1777 &vfio_ap_mdev_attr_group, 1778 NULL 1779 }; 1780 1781 /** 1782 * vfio_ap_mdev_set_kvm - sets all data for @matrix_mdev that are needed 1783 * to manage AP resources for the guest whose state is represented by @kvm 1784 * 1785 * @matrix_mdev: a mediated matrix device 1786 * @kvm: reference to KVM instance 1787 * 1788 * Return: 0 if no other mediated matrix device has a reference to @kvm; 1789 * otherwise, returns an -EPERM. 1790 */ 1791 static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev, 1792 struct kvm *kvm) 1793 { 1794 struct ap_matrix_mdev *m; 1795 1796 if (kvm->arch.crypto.crycbd) { 1797 down_write(&kvm->arch.crypto.pqap_hook_rwsem); 1798 kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook; 1799 up_write(&kvm->arch.crypto.pqap_hook_rwsem); 1800 1801 get_update_locks_for_kvm(kvm); 1802 1803 list_for_each_entry(m, &matrix_dev->mdev_list, node) { 1804 if (m != matrix_mdev && m->kvm == kvm) { 1805 release_update_locks_for_kvm(kvm); 1806 return -EPERM; 1807 } 1808 } 1809 1810 kvm_get_kvm(kvm); 1811 matrix_mdev->kvm = kvm; 1812 vfio_ap_mdev_update_guest_apcb(matrix_mdev); 1813 1814 release_update_locks_for_kvm(kvm); 1815 } 1816 1817 return 0; 1818 } 1819 1820 static void unmap_iova(struct ap_matrix_mdev *matrix_mdev, u64 iova, u64 length) 1821 { 1822 struct ap_queue_table *qtable = &matrix_mdev->qtable; 1823 struct vfio_ap_queue *q; 1824 int loop_cursor; 1825 1826 hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode) { 1827 if (q->saved_iova >= iova && q->saved_iova < iova + length) 1828 vfio_ap_irq_disable(q); 1829 } 1830 } 1831 1832 static void vfio_ap_mdev_dma_unmap(struct vfio_device *vdev, u64 iova, 1833 u64 length) 1834 { 1835 struct ap_matrix_mdev *matrix_mdev = 1836 container_of(vdev, struct ap_matrix_mdev, vdev); 1837 1838 mutex_lock(&matrix_dev->mdevs_lock); 1839 1840 unmap_iova(matrix_mdev, iova, length); 1841 1842 mutex_unlock(&matrix_dev->mdevs_lock); 1843 } 1844 1845 /** 1846 * vfio_ap_mdev_unset_kvm - performs clean-up of resources no longer needed 1847 * by @matrix_mdev. 1848 * 1849 * @matrix_mdev: a matrix mediated device 1850 */ 1851 static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev) 1852 { 1853 struct kvm *kvm = matrix_mdev->kvm; 1854 1855 if (kvm && kvm->arch.crypto.crycbd) { 1856 down_write(&kvm->arch.crypto.pqap_hook_rwsem); 1857 kvm->arch.crypto.pqap_hook = NULL; 1858 up_write(&kvm->arch.crypto.pqap_hook_rwsem); 1859 1860 get_update_locks_for_kvm(kvm); 1861 1862 kvm_arch_crypto_clear_masks(kvm); 1863 vfio_ap_mdev_reset_queues(matrix_mdev); 1864 kvm_put_kvm(kvm); 1865 matrix_mdev->kvm = NULL; 1866 1867 release_update_locks_for_kvm(kvm); 1868 } 1869 } 1870 1871 static struct vfio_ap_queue *vfio_ap_find_queue(int apqn) 1872 { 1873 struct ap_queue *queue; 1874 struct vfio_ap_queue *q = NULL; 1875 1876 queue = ap_get_qdev(apqn); 1877 if (!queue) 1878 return NULL; 1879 1880 if (queue->ap_dev.device.driver == &matrix_dev->vfio_ap_drv->driver) 1881 q = dev_get_drvdata(&queue->ap_dev.device); 1882 1883 put_device(&queue->ap_dev.device); 1884 1885 return q; 1886 } 1887 1888 static int apq_status_check(int apqn, struct ap_queue_status *status) 1889 { 1890 switch (status->response_code) { 1891 case AP_RESPONSE_NORMAL: 1892 case AP_RESPONSE_DECONFIGURED: 1893 case AP_RESPONSE_CHECKSTOPPED: 1894 return 0; 1895 case AP_RESPONSE_RESET_IN_PROGRESS: 1896 case AP_RESPONSE_BUSY: 1897 return -EBUSY; 1898 case AP_RESPONSE_ASSOC_SECRET_NOT_UNIQUE: 1899 case AP_RESPONSE_ASSOC_FAILED: 1900 /* 1901 * These asynchronous response codes indicate a PQAP(AAPQ) 1902 * instruction to associate a secret with the guest failed. All 1903 * subsequent AP instructions will end with the asynchronous 1904 * response code until the AP queue is reset; so, let's return 1905 * a value indicating a reset needs to be performed again. 1906 */ 1907 return -EAGAIN; 1908 default: 1909 WARN(true, 1910 "failed to verify reset of queue %02x.%04x: TAPQ rc=%u\n", 1911 AP_QID_CARD(apqn), AP_QID_QUEUE(apqn), 1912 status->response_code); 1913 return -EIO; 1914 } 1915 } 1916 1917 #define WAIT_MSG "Waited %dms for reset of queue %02x.%04x (%u, %u, %u)" 1918 1919 static void apq_reset_check(struct work_struct *reset_work) 1920 { 1921 int ret = -EBUSY, elapsed = 0; 1922 struct ap_queue_status status; 1923 struct vfio_ap_queue *q; 1924 1925 q = container_of(reset_work, struct vfio_ap_queue, reset_work); 1926 memcpy(&status, &q->reset_status, sizeof(status)); 1927 while (true) { 1928 msleep(AP_RESET_INTERVAL); 1929 elapsed += AP_RESET_INTERVAL; 1930 status = ap_tapq(q->apqn, NULL); 1931 ret = apq_status_check(q->apqn, &status); 1932 if (ret == -EIO) 1933 return; 1934 if (ret == -EBUSY) { 1935 pr_notice_ratelimited(WAIT_MSG, elapsed, 1936 AP_QID_CARD(q->apqn), 1937 AP_QID_QUEUE(q->apqn), 1938 status.response_code, 1939 status.queue_empty, 1940 status.irq_enabled); 1941 } else { 1942 if (q->reset_status.response_code == AP_RESPONSE_RESET_IN_PROGRESS || 1943 q->reset_status.response_code == AP_RESPONSE_BUSY || 1944 q->reset_status.response_code == AP_RESPONSE_STATE_CHANGE_IN_PROGRESS || 1945 ret == -EAGAIN) { 1946 status = ap_zapq(q->apqn, 0); 1947 memcpy(&q->reset_status, &status, sizeof(status)); 1948 continue; 1949 } 1950 if (q->saved_isc != VFIO_AP_ISC_INVALID) 1951 vfio_ap_free_aqic_resources(q); 1952 break; 1953 } 1954 } 1955 } 1956 1957 static void vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q) 1958 { 1959 struct ap_queue_status status; 1960 1961 if (!q) 1962 return; 1963 status = ap_zapq(q->apqn, 0); 1964 memcpy(&q->reset_status, &status, sizeof(status)); 1965 switch (status.response_code) { 1966 case AP_RESPONSE_NORMAL: 1967 case AP_RESPONSE_RESET_IN_PROGRESS: 1968 case AP_RESPONSE_BUSY: 1969 case AP_RESPONSE_STATE_CHANGE_IN_PROGRESS: 1970 /* 1971 * Let's verify whether the ZAPQ completed successfully on a work queue. 1972 */ 1973 queue_work(system_long_wq, &q->reset_work); 1974 break; 1975 case AP_RESPONSE_DECONFIGURED: 1976 case AP_RESPONSE_CHECKSTOPPED: 1977 vfio_ap_free_aqic_resources(q); 1978 break; 1979 default: 1980 WARN(true, 1981 "PQAP/ZAPQ for %02x.%04x failed with invalid rc=%u\n", 1982 AP_QID_CARD(q->apqn), AP_QID_QUEUE(q->apqn), 1983 status.response_code); 1984 } 1985 } 1986 1987 static int vfio_ap_mdev_reset_queues(struct ap_matrix_mdev *matrix_mdev) 1988 { 1989 int ret = 0, loop_cursor; 1990 struct vfio_ap_queue *q; 1991 1992 hash_for_each(matrix_mdev->qtable.queues, loop_cursor, q, mdev_qnode) 1993 vfio_ap_mdev_reset_queue(q); 1994 1995 hash_for_each(matrix_mdev->qtable.queues, loop_cursor, q, mdev_qnode) { 1996 flush_work(&q->reset_work); 1997 1998 if (q->reset_status.response_code) 1999 ret = -EIO; 2000 } 2001 2002 return ret; 2003 } 2004 2005 static int vfio_ap_mdev_reset_qlist(struct list_head *qlist) 2006 { 2007 int ret = 0; 2008 struct vfio_ap_queue *q; 2009 2010 list_for_each_entry(q, qlist, reset_qnode) 2011 vfio_ap_mdev_reset_queue(q); 2012 2013 list_for_each_entry(q, qlist, reset_qnode) { 2014 flush_work(&q->reset_work); 2015 2016 if (q->reset_status.response_code) 2017 ret = -EIO; 2018 } 2019 2020 return ret; 2021 } 2022 2023 static int vfio_ap_mdev_open_device(struct vfio_device *vdev) 2024 { 2025 struct ap_matrix_mdev *matrix_mdev = 2026 container_of(vdev, struct ap_matrix_mdev, vdev); 2027 2028 if (!vdev->kvm) 2029 return -EINVAL; 2030 2031 return vfio_ap_mdev_set_kvm(matrix_mdev, vdev->kvm); 2032 } 2033 2034 static void vfio_ap_mdev_close_device(struct vfio_device *vdev) 2035 { 2036 struct ap_matrix_mdev *matrix_mdev = 2037 container_of(vdev, struct ap_matrix_mdev, vdev); 2038 2039 vfio_ap_mdev_unset_kvm(matrix_mdev); 2040 } 2041 2042 static void vfio_ap_mdev_request(struct vfio_device *vdev, unsigned int count) 2043 { 2044 struct device *dev = vdev->dev; 2045 struct ap_matrix_mdev *matrix_mdev; 2046 2047 matrix_mdev = container_of(vdev, struct ap_matrix_mdev, vdev); 2048 2049 if (matrix_mdev->req_trigger) { 2050 if (!(count % 10)) 2051 dev_notice_ratelimited(dev, 2052 "Relaying device request to user (#%u)\n", 2053 count); 2054 2055 eventfd_signal(matrix_mdev->req_trigger); 2056 } else if (count == 0) { 2057 dev_notice(dev, 2058 "No device request registered, blocked until released by user\n"); 2059 } 2060 } 2061 2062 static int vfio_ap_mdev_get_device_info(unsigned long arg) 2063 { 2064 unsigned long minsz; 2065 struct vfio_device_info info; 2066 2067 minsz = offsetofend(struct vfio_device_info, num_irqs); 2068 2069 if (copy_from_user(&info, (void __user *)arg, minsz)) 2070 return -EFAULT; 2071 2072 if (info.argsz < minsz) 2073 return -EINVAL; 2074 2075 info.flags = VFIO_DEVICE_FLAGS_AP | VFIO_DEVICE_FLAGS_RESET; 2076 info.num_regions = 0; 2077 info.num_irqs = VFIO_AP_NUM_IRQS; 2078 2079 return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0; 2080 } 2081 2082 static ssize_t vfio_ap_get_irq_info(unsigned long arg) 2083 { 2084 unsigned long minsz; 2085 struct vfio_irq_info info; 2086 2087 minsz = offsetofend(struct vfio_irq_info, count); 2088 2089 if (copy_from_user(&info, (void __user *)arg, minsz)) 2090 return -EFAULT; 2091 2092 if (info.argsz < minsz || info.index >= VFIO_AP_NUM_IRQS) 2093 return -EINVAL; 2094 2095 switch (info.index) { 2096 case VFIO_AP_REQ_IRQ_INDEX: 2097 info.count = 1; 2098 info.flags = VFIO_IRQ_INFO_EVENTFD; 2099 break; 2100 default: 2101 return -EINVAL; 2102 } 2103 2104 return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0; 2105 } 2106 2107 static int vfio_ap_irq_set_init(struct vfio_irq_set *irq_set, unsigned long arg) 2108 { 2109 int ret; 2110 size_t data_size; 2111 unsigned long minsz; 2112 2113 minsz = offsetofend(struct vfio_irq_set, count); 2114 2115 if (copy_from_user(irq_set, (void __user *)arg, minsz)) 2116 return -EFAULT; 2117 2118 ret = vfio_set_irqs_validate_and_prepare(irq_set, 1, VFIO_AP_NUM_IRQS, 2119 &data_size); 2120 if (ret) 2121 return ret; 2122 2123 if (!(irq_set->flags & VFIO_IRQ_SET_ACTION_TRIGGER)) 2124 return -EINVAL; 2125 2126 return 0; 2127 } 2128 2129 static int vfio_ap_set_request_irq(struct ap_matrix_mdev *matrix_mdev, 2130 unsigned long arg) 2131 { 2132 s32 fd; 2133 void __user *data; 2134 unsigned long minsz; 2135 struct eventfd_ctx *req_trigger; 2136 2137 minsz = offsetofend(struct vfio_irq_set, count); 2138 data = (void __user *)(arg + minsz); 2139 2140 if (get_user(fd, (s32 __user *)data)) 2141 return -EFAULT; 2142 2143 if (fd == -1) { 2144 if (matrix_mdev->req_trigger) 2145 eventfd_ctx_put(matrix_mdev->req_trigger); 2146 matrix_mdev->req_trigger = NULL; 2147 } else if (fd >= 0) { 2148 req_trigger = eventfd_ctx_fdget(fd); 2149 if (IS_ERR(req_trigger)) 2150 return PTR_ERR(req_trigger); 2151 2152 if (matrix_mdev->req_trigger) 2153 eventfd_ctx_put(matrix_mdev->req_trigger); 2154 2155 matrix_mdev->req_trigger = req_trigger; 2156 } else { 2157 return -EINVAL; 2158 } 2159 2160 return 0; 2161 } 2162 2163 static int vfio_ap_set_irqs(struct ap_matrix_mdev *matrix_mdev, 2164 unsigned long arg) 2165 { 2166 int ret; 2167 struct vfio_irq_set irq_set; 2168 2169 ret = vfio_ap_irq_set_init(&irq_set, arg); 2170 if (ret) 2171 return ret; 2172 2173 switch (irq_set.flags & VFIO_IRQ_SET_DATA_TYPE_MASK) { 2174 case VFIO_IRQ_SET_DATA_EVENTFD: 2175 switch (irq_set.index) { 2176 case VFIO_AP_REQ_IRQ_INDEX: 2177 return vfio_ap_set_request_irq(matrix_mdev, arg); 2178 default: 2179 return -EINVAL; 2180 } 2181 default: 2182 return -EINVAL; 2183 } 2184 } 2185 2186 static ssize_t vfio_ap_mdev_ioctl(struct vfio_device *vdev, 2187 unsigned int cmd, unsigned long arg) 2188 { 2189 struct ap_matrix_mdev *matrix_mdev = 2190 container_of(vdev, struct ap_matrix_mdev, vdev); 2191 int ret; 2192 2193 mutex_lock(&matrix_dev->mdevs_lock); 2194 switch (cmd) { 2195 case VFIO_DEVICE_GET_INFO: 2196 ret = vfio_ap_mdev_get_device_info(arg); 2197 break; 2198 case VFIO_DEVICE_RESET: 2199 ret = vfio_ap_mdev_reset_queues(matrix_mdev); 2200 break; 2201 case VFIO_DEVICE_GET_IRQ_INFO: 2202 ret = vfio_ap_get_irq_info(arg); 2203 break; 2204 case VFIO_DEVICE_SET_IRQS: 2205 ret = vfio_ap_set_irqs(matrix_mdev, arg); 2206 break; 2207 default: 2208 ret = -EOPNOTSUPP; 2209 break; 2210 } 2211 mutex_unlock(&matrix_dev->mdevs_lock); 2212 2213 return ret; 2214 } 2215 2216 static struct ap_matrix_mdev *vfio_ap_mdev_for_queue(struct vfio_ap_queue *q) 2217 { 2218 struct ap_matrix_mdev *matrix_mdev; 2219 unsigned long apid = AP_QID_CARD(q->apqn); 2220 unsigned long apqi = AP_QID_QUEUE(q->apqn); 2221 2222 list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) { 2223 if (test_bit_inv(apid, matrix_mdev->matrix.apm) && 2224 test_bit_inv(apqi, matrix_mdev->matrix.aqm)) 2225 return matrix_mdev; 2226 } 2227 2228 return NULL; 2229 } 2230 2231 static ssize_t status_show(struct device *dev, 2232 struct device_attribute *attr, 2233 char *buf) 2234 { 2235 ssize_t nchars = 0; 2236 struct vfio_ap_queue *q; 2237 unsigned long apid, apqi; 2238 struct ap_matrix_mdev *matrix_mdev; 2239 struct ap_device *apdev = to_ap_dev(dev); 2240 2241 mutex_lock(&matrix_dev->mdevs_lock); 2242 q = dev_get_drvdata(&apdev->device); 2243 matrix_mdev = vfio_ap_mdev_for_queue(q); 2244 2245 /* If the queue is assigned to the matrix mediated device, then 2246 * determine whether it is passed through to a guest; otherwise, 2247 * indicate that it is unassigned. 2248 */ 2249 if (matrix_mdev) { 2250 apid = AP_QID_CARD(q->apqn); 2251 apqi = AP_QID_QUEUE(q->apqn); 2252 /* 2253 * If the queue is passed through to the guest, then indicate 2254 * that it is in use; otherwise, indicate that it is 2255 * merely assigned to a matrix mediated device. 2256 */ 2257 if (matrix_mdev->kvm && 2258 test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) && 2259 test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) 2260 nchars = sysfs_emit(buf, "%s\n", AP_QUEUE_IN_USE); 2261 else 2262 nchars = sysfs_emit(buf, "%s\n", AP_QUEUE_ASSIGNED); 2263 } else { 2264 nchars = sysfs_emit(buf, "%s\n", AP_QUEUE_UNASSIGNED); 2265 } 2266 2267 mutex_unlock(&matrix_dev->mdevs_lock); 2268 2269 return nchars; 2270 } 2271 2272 static DEVICE_ATTR_RO(status); 2273 2274 static struct attribute *vfio_queue_attrs[] = { 2275 &dev_attr_status.attr, 2276 NULL, 2277 }; 2278 2279 static const struct attribute_group vfio_queue_attr_group = { 2280 .attrs = vfio_queue_attrs, 2281 }; 2282 2283 static const struct vfio_device_ops vfio_ap_matrix_dev_ops = { 2284 .init = vfio_ap_mdev_init_dev, 2285 .open_device = vfio_ap_mdev_open_device, 2286 .close_device = vfio_ap_mdev_close_device, 2287 .ioctl = vfio_ap_mdev_ioctl, 2288 .dma_unmap = vfio_ap_mdev_dma_unmap, 2289 .bind_iommufd = vfio_iommufd_emulated_bind, 2290 .unbind_iommufd = vfio_iommufd_emulated_unbind, 2291 .attach_ioas = vfio_iommufd_emulated_attach_ioas, 2292 .detach_ioas = vfio_iommufd_emulated_detach_ioas, 2293 .request = vfio_ap_mdev_request 2294 }; 2295 2296 static struct mdev_driver vfio_ap_matrix_driver = { 2297 .device_api = VFIO_DEVICE_API_AP_STRING, 2298 .max_instances = MAX_ZDEV_ENTRIES_EXT, 2299 .driver = { 2300 .name = "vfio_ap_mdev", 2301 .owner = THIS_MODULE, 2302 .mod_name = KBUILD_MODNAME, 2303 .dev_groups = vfio_ap_mdev_attr_groups, 2304 }, 2305 .probe = vfio_ap_mdev_probe, 2306 .remove = vfio_ap_mdev_remove, 2307 }; 2308 2309 int vfio_ap_mdev_register(void) 2310 { 2311 int ret; 2312 2313 ret = mdev_register_driver(&vfio_ap_matrix_driver); 2314 if (ret) 2315 return ret; 2316 2317 matrix_dev->mdev_type.sysfs_name = VFIO_AP_MDEV_TYPE_HWVIRT; 2318 matrix_dev->mdev_type.pretty_name = VFIO_AP_MDEV_NAME_HWVIRT; 2319 matrix_dev->mdev_types[0] = &matrix_dev->mdev_type; 2320 ret = mdev_register_parent(&matrix_dev->parent, &matrix_dev->device, 2321 &vfio_ap_matrix_driver, 2322 matrix_dev->mdev_types, 1); 2323 if (ret) 2324 goto err_driver; 2325 return 0; 2326 2327 err_driver: 2328 mdev_unregister_driver(&vfio_ap_matrix_driver); 2329 return ret; 2330 } 2331 2332 void vfio_ap_mdev_unregister(void) 2333 { 2334 mdev_unregister_parent(&matrix_dev->parent); 2335 mdev_unregister_driver(&vfio_ap_matrix_driver); 2336 } 2337 2338 int vfio_ap_mdev_probe_queue(struct ap_device *apdev) 2339 { 2340 int ret; 2341 struct vfio_ap_queue *q; 2342 DECLARE_BITMAP(apm_filtered, AP_DEVICES); 2343 struct ap_matrix_mdev *matrix_mdev; 2344 2345 ret = sysfs_create_group(&apdev->device.kobj, &vfio_queue_attr_group); 2346 if (ret) 2347 return ret; 2348 2349 q = kzalloc(sizeof(*q), GFP_KERNEL); 2350 if (!q) { 2351 ret = -ENOMEM; 2352 goto err_remove_group; 2353 } 2354 2355 q->apqn = to_ap_queue(&apdev->device)->qid; 2356 q->saved_isc = VFIO_AP_ISC_INVALID; 2357 memset(&q->reset_status, 0, sizeof(q->reset_status)); 2358 INIT_WORK(&q->reset_work, apq_reset_check); 2359 matrix_mdev = get_update_locks_by_apqn(q->apqn); 2360 2361 if (matrix_mdev) { 2362 vfio_ap_mdev_link_queue(matrix_mdev, q); 2363 2364 /* 2365 * If we're in the process of handling the adding of adapters or 2366 * domains to the host's AP configuration, then let the 2367 * vfio_ap device driver's on_scan_complete callback filter the 2368 * matrix and update the guest's AP configuration after all of 2369 * the new queue devices are probed. 2370 */ 2371 if (!bitmap_empty(matrix_mdev->apm_add, AP_DEVICES) || 2372 !bitmap_empty(matrix_mdev->aqm_add, AP_DOMAINS)) 2373 goto done; 2374 2375 if (vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered)) { 2376 vfio_ap_mdev_update_guest_apcb(matrix_mdev); 2377 reset_queues_for_apids(matrix_mdev, apm_filtered); 2378 } 2379 } 2380 2381 done: 2382 dev_set_drvdata(&apdev->device, q); 2383 release_update_locks_for_mdev(matrix_mdev); 2384 2385 return ret; 2386 2387 err_remove_group: 2388 sysfs_remove_group(&apdev->device.kobj, &vfio_queue_attr_group); 2389 return ret; 2390 } 2391 2392 void vfio_ap_mdev_remove_queue(struct ap_device *apdev) 2393 { 2394 unsigned long apid, apqi; 2395 struct vfio_ap_queue *q; 2396 struct ap_matrix_mdev *matrix_mdev; 2397 2398 sysfs_remove_group(&apdev->device.kobj, &vfio_queue_attr_group); 2399 q = dev_get_drvdata(&apdev->device); 2400 get_update_locks_for_queue(q); 2401 matrix_mdev = q->matrix_mdev; 2402 apid = AP_QID_CARD(q->apqn); 2403 apqi = AP_QID_QUEUE(q->apqn); 2404 2405 if (matrix_mdev) { 2406 /* If the queue is assigned to the guest's AP configuration */ 2407 if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) && 2408 test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) { 2409 /* 2410 * Since the queues are defined via a matrix of adapters 2411 * and domains, it is not possible to hot unplug a 2412 * single queue; so, let's unplug the adapter. 2413 */ 2414 clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm); 2415 vfio_ap_mdev_update_guest_apcb(matrix_mdev); 2416 reset_queues_for_apid(matrix_mdev, apid); 2417 goto done; 2418 } 2419 } 2420 2421 /* 2422 * If the queue is not in the host's AP configuration, then resetting 2423 * it will fail with response code 01, (APQN not valid); so, let's make 2424 * sure it is in the host's config. 2425 */ 2426 if (test_bit_inv(apid, (unsigned long *)matrix_dev->info.apm) && 2427 test_bit_inv(apqi, (unsigned long *)matrix_dev->info.aqm)) { 2428 vfio_ap_mdev_reset_queue(q); 2429 flush_work(&q->reset_work); 2430 } 2431 2432 done: 2433 if (matrix_mdev) 2434 vfio_ap_unlink_queue_fr_mdev(q); 2435 2436 dev_set_drvdata(&apdev->device, NULL); 2437 kfree(q); 2438 release_update_locks_for_mdev(matrix_mdev); 2439 } 2440 2441 /** 2442 * vfio_ap_mdev_resource_in_use: check whether any of a set of APQNs is 2443 * assigned to a mediated device under the control 2444 * of the vfio_ap device driver. 2445 * 2446 * @apm: a bitmap specifying a set of APIDs comprising the APQNs to check. 2447 * @aqm: a bitmap specifying a set of APQIs comprising the APQNs to check. 2448 * 2449 * Return: 2450 * * -EADDRINUSE if one or more of the APQNs specified via @apm/@aqm are 2451 * assigned to a mediated device under the control of the vfio_ap 2452 * device driver. 2453 * * Otherwise, return 0. 2454 */ 2455 int vfio_ap_mdev_resource_in_use(unsigned long *apm, unsigned long *aqm) 2456 { 2457 int ret; 2458 2459 mutex_lock(&matrix_dev->guests_lock); 2460 mutex_lock(&matrix_dev->mdevs_lock); 2461 ret = vfio_ap_mdev_verify_no_sharing(apm, aqm); 2462 mutex_unlock(&matrix_dev->mdevs_lock); 2463 mutex_unlock(&matrix_dev->guests_lock); 2464 2465 return ret; 2466 } 2467 2468 /** 2469 * vfio_ap_mdev_hot_unplug_cfg - hot unplug the adapters, domains and control 2470 * domains that have been removed from the host's 2471 * AP configuration from a guest. 2472 * 2473 * @matrix_mdev: an ap_matrix_mdev object attached to a KVM guest. 2474 * @aprem: the adapters that have been removed from the host's AP configuration 2475 * @aqrem: the domains that have been removed from the host's AP configuration 2476 * @cdrem: the control domains that have been removed from the host's AP 2477 * configuration. 2478 */ 2479 static void vfio_ap_mdev_hot_unplug_cfg(struct ap_matrix_mdev *matrix_mdev, 2480 unsigned long *aprem, 2481 unsigned long *aqrem, 2482 unsigned long *cdrem) 2483 { 2484 int do_hotplug = 0; 2485 2486 if (!bitmap_empty(aprem, AP_DEVICES)) { 2487 do_hotplug |= bitmap_andnot(matrix_mdev->shadow_apcb.apm, 2488 matrix_mdev->shadow_apcb.apm, 2489 aprem, AP_DEVICES); 2490 } 2491 2492 if (!bitmap_empty(aqrem, AP_DOMAINS)) { 2493 do_hotplug |= bitmap_andnot(matrix_mdev->shadow_apcb.aqm, 2494 matrix_mdev->shadow_apcb.aqm, 2495 aqrem, AP_DEVICES); 2496 } 2497 2498 if (!bitmap_empty(cdrem, AP_DOMAINS)) 2499 do_hotplug |= bitmap_andnot(matrix_mdev->shadow_apcb.adm, 2500 matrix_mdev->shadow_apcb.adm, 2501 cdrem, AP_DOMAINS); 2502 2503 if (do_hotplug) 2504 vfio_ap_mdev_update_guest_apcb(matrix_mdev); 2505 } 2506 2507 /** 2508 * vfio_ap_mdev_cfg_remove - determines which guests are using the adapters, 2509 * domains and control domains that have been removed 2510 * from the host AP configuration and unplugs them 2511 * from those guests. 2512 * 2513 * @ap_remove: bitmap specifying which adapters have been removed from the host 2514 * config. 2515 * @aq_remove: bitmap specifying which domains have been removed from the host 2516 * config. 2517 * @cd_remove: bitmap specifying which control domains have been removed from 2518 * the host config. 2519 */ 2520 static void vfio_ap_mdev_cfg_remove(unsigned long *ap_remove, 2521 unsigned long *aq_remove, 2522 unsigned long *cd_remove) 2523 { 2524 struct ap_matrix_mdev *matrix_mdev; 2525 DECLARE_BITMAP(aprem, AP_DEVICES); 2526 DECLARE_BITMAP(aqrem, AP_DOMAINS); 2527 DECLARE_BITMAP(cdrem, AP_DOMAINS); 2528 int do_remove = 0; 2529 2530 list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) { 2531 mutex_lock(&matrix_mdev->kvm->lock); 2532 mutex_lock(&matrix_dev->mdevs_lock); 2533 2534 do_remove |= bitmap_and(aprem, ap_remove, 2535 matrix_mdev->matrix.apm, 2536 AP_DEVICES); 2537 do_remove |= bitmap_and(aqrem, aq_remove, 2538 matrix_mdev->matrix.aqm, 2539 AP_DOMAINS); 2540 do_remove |= bitmap_andnot(cdrem, cd_remove, 2541 matrix_mdev->matrix.adm, 2542 AP_DOMAINS); 2543 2544 if (do_remove) 2545 vfio_ap_mdev_hot_unplug_cfg(matrix_mdev, aprem, aqrem, 2546 cdrem); 2547 2548 mutex_unlock(&matrix_dev->mdevs_lock); 2549 mutex_unlock(&matrix_mdev->kvm->lock); 2550 } 2551 } 2552 2553 /** 2554 * vfio_ap_mdev_on_cfg_remove - responds to the removal of adapters, domains and 2555 * control domains from the host AP configuration 2556 * by unplugging them from the guests that are 2557 * using them. 2558 * @cur_config_info: the current host AP configuration information 2559 * @prev_config_info: the previous host AP configuration information 2560 */ 2561 static void vfio_ap_mdev_on_cfg_remove(struct ap_config_info *cur_config_info, 2562 struct ap_config_info *prev_config_info) 2563 { 2564 int do_remove; 2565 DECLARE_BITMAP(aprem, AP_DEVICES); 2566 DECLARE_BITMAP(aqrem, AP_DOMAINS); 2567 DECLARE_BITMAP(cdrem, AP_DOMAINS); 2568 2569 do_remove = bitmap_andnot(aprem, 2570 (unsigned long *)prev_config_info->apm, 2571 (unsigned long *)cur_config_info->apm, 2572 AP_DEVICES); 2573 do_remove |= bitmap_andnot(aqrem, 2574 (unsigned long *)prev_config_info->aqm, 2575 (unsigned long *)cur_config_info->aqm, 2576 AP_DEVICES); 2577 do_remove |= bitmap_andnot(cdrem, 2578 (unsigned long *)prev_config_info->adm, 2579 (unsigned long *)cur_config_info->adm, 2580 AP_DEVICES); 2581 2582 if (do_remove) 2583 vfio_ap_mdev_cfg_remove(aprem, aqrem, cdrem); 2584 } 2585 2586 /** 2587 * vfio_ap_filter_apid_by_qtype: filter APIDs from an AP mask for adapters that 2588 * are older than AP type 10 (CEX4). 2589 * @apm: a bitmap of the APIDs to examine 2590 * @aqm: a bitmap of the APQIs of the queues to query for the AP type. 2591 */ 2592 static void vfio_ap_filter_apid_by_qtype(unsigned long *apm, unsigned long *aqm) 2593 { 2594 bool apid_cleared; 2595 struct ap_queue_status status; 2596 unsigned long apid, apqi; 2597 struct ap_tapq_hwinfo info; 2598 2599 for_each_set_bit_inv(apid, apm, AP_DEVICES) { 2600 apid_cleared = false; 2601 2602 for_each_set_bit_inv(apqi, aqm, AP_DOMAINS) { 2603 status = ap_test_queue(AP_MKQID(apid, apqi), 1, &info); 2604 switch (status.response_code) { 2605 /* 2606 * According to the architecture in each case 2607 * below, the queue's info should be filled. 2608 */ 2609 case AP_RESPONSE_NORMAL: 2610 case AP_RESPONSE_RESET_IN_PROGRESS: 2611 case AP_RESPONSE_DECONFIGURED: 2612 case AP_RESPONSE_CHECKSTOPPED: 2613 case AP_RESPONSE_BUSY: 2614 /* 2615 * The vfio_ap device driver only 2616 * supports CEX4 and newer adapters, so 2617 * remove the APID if the adapter is 2618 * older than a CEX4. 2619 */ 2620 if (info.at < AP_DEVICE_TYPE_CEX4) { 2621 clear_bit_inv(apid, apm); 2622 apid_cleared = true; 2623 } 2624 2625 break; 2626 2627 default: 2628 /* 2629 * If we don't know the adapter type, 2630 * clear its APID since it can't be 2631 * determined whether the vfio_ap 2632 * device driver supports it. 2633 */ 2634 clear_bit_inv(apid, apm); 2635 apid_cleared = true; 2636 break; 2637 } 2638 2639 /* 2640 * If we've already cleared the APID from the apm, there 2641 * is no need to continue examining the remainin AP 2642 * queues to determine the type of the adapter. 2643 */ 2644 if (apid_cleared) 2645 continue; 2646 } 2647 } 2648 } 2649 2650 /** 2651 * vfio_ap_mdev_cfg_add - store bitmaps specifying the adapters, domains and 2652 * control domains that have been added to the host's 2653 * AP configuration for each matrix mdev to which they 2654 * are assigned. 2655 * 2656 * @apm_add: a bitmap specifying the adapters that have been added to the AP 2657 * configuration. 2658 * @aqm_add: a bitmap specifying the domains that have been added to the AP 2659 * configuration. 2660 * @adm_add: a bitmap specifying the control domains that have been added to the 2661 * AP configuration. 2662 */ 2663 static void vfio_ap_mdev_cfg_add(unsigned long *apm_add, unsigned long *aqm_add, 2664 unsigned long *adm_add) 2665 { 2666 struct ap_matrix_mdev *matrix_mdev; 2667 2668 if (list_empty(&matrix_dev->mdev_list)) 2669 return; 2670 2671 vfio_ap_filter_apid_by_qtype(apm_add, aqm_add); 2672 2673 list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) { 2674 bitmap_and(matrix_mdev->apm_add, 2675 matrix_mdev->matrix.apm, apm_add, AP_DEVICES); 2676 bitmap_and(matrix_mdev->aqm_add, 2677 matrix_mdev->matrix.aqm, aqm_add, AP_DOMAINS); 2678 bitmap_and(matrix_mdev->adm_add, 2679 matrix_mdev->matrix.adm, adm_add, AP_DEVICES); 2680 } 2681 } 2682 2683 /** 2684 * vfio_ap_mdev_on_cfg_add - responds to the addition of adapters, domains and 2685 * control domains to the host AP configuration 2686 * by updating the bitmaps that specify what adapters, 2687 * domains and control domains have been added so they 2688 * can be hot plugged into the guest when the AP bus 2689 * scan completes (see vfio_ap_on_scan_complete 2690 * function). 2691 * @cur_config_info: the current AP configuration information 2692 * @prev_config_info: the previous AP configuration information 2693 */ 2694 static void vfio_ap_mdev_on_cfg_add(struct ap_config_info *cur_config_info, 2695 struct ap_config_info *prev_config_info) 2696 { 2697 bool do_add; 2698 DECLARE_BITMAP(apm_add, AP_DEVICES); 2699 DECLARE_BITMAP(aqm_add, AP_DOMAINS); 2700 DECLARE_BITMAP(adm_add, AP_DOMAINS); 2701 2702 do_add = bitmap_andnot(apm_add, 2703 (unsigned long *)cur_config_info->apm, 2704 (unsigned long *)prev_config_info->apm, 2705 AP_DEVICES); 2706 do_add |= bitmap_andnot(aqm_add, 2707 (unsigned long *)cur_config_info->aqm, 2708 (unsigned long *)prev_config_info->aqm, 2709 AP_DOMAINS); 2710 do_add |= bitmap_andnot(adm_add, 2711 (unsigned long *)cur_config_info->adm, 2712 (unsigned long *)prev_config_info->adm, 2713 AP_DOMAINS); 2714 2715 if (do_add) 2716 vfio_ap_mdev_cfg_add(apm_add, aqm_add, adm_add); 2717 } 2718 2719 /** 2720 * vfio_ap_on_cfg_changed - handles notification of changes to the host AP 2721 * configuration. 2722 * 2723 * @cur_cfg_info: the current host AP configuration 2724 * @prev_cfg_info: the previous host AP configuration 2725 */ 2726 void vfio_ap_on_cfg_changed(struct ap_config_info *cur_cfg_info, 2727 struct ap_config_info *prev_cfg_info) 2728 { 2729 if (!cur_cfg_info || !prev_cfg_info) 2730 return; 2731 2732 mutex_lock(&matrix_dev->guests_lock); 2733 2734 vfio_ap_mdev_on_cfg_remove(cur_cfg_info, prev_cfg_info); 2735 vfio_ap_mdev_on_cfg_add(cur_cfg_info, prev_cfg_info); 2736 memcpy(&matrix_dev->info, cur_cfg_info, sizeof(*cur_cfg_info)); 2737 2738 mutex_unlock(&matrix_dev->guests_lock); 2739 } 2740 2741 static void vfio_ap_mdev_hot_plug_cfg(struct ap_matrix_mdev *matrix_mdev) 2742 { 2743 DECLARE_BITMAP(apm_filtered, AP_DEVICES); 2744 bool filter_domains, filter_adapters, filter_cdoms, do_hotplug = false; 2745 2746 mutex_lock(&matrix_mdev->kvm->lock); 2747 mutex_lock(&matrix_dev->mdevs_lock); 2748 2749 filter_adapters = bitmap_intersects(matrix_mdev->matrix.apm, 2750 matrix_mdev->apm_add, AP_DEVICES); 2751 filter_domains = bitmap_intersects(matrix_mdev->matrix.aqm, 2752 matrix_mdev->aqm_add, AP_DOMAINS); 2753 filter_cdoms = bitmap_intersects(matrix_mdev->matrix.adm, 2754 matrix_mdev->adm_add, AP_DOMAINS); 2755 2756 if (filter_adapters || filter_domains) 2757 do_hotplug = vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered); 2758 2759 if (filter_cdoms) 2760 do_hotplug |= vfio_ap_mdev_filter_cdoms(matrix_mdev); 2761 2762 if (do_hotplug) 2763 vfio_ap_mdev_update_guest_apcb(matrix_mdev); 2764 2765 reset_queues_for_apids(matrix_mdev, apm_filtered); 2766 2767 mutex_unlock(&matrix_dev->mdevs_lock); 2768 mutex_unlock(&matrix_mdev->kvm->lock); 2769 } 2770 2771 void vfio_ap_on_scan_complete(struct ap_config_info *new_config_info, 2772 struct ap_config_info *old_config_info) 2773 { 2774 struct ap_matrix_mdev *matrix_mdev; 2775 2776 mutex_lock(&matrix_dev->guests_lock); 2777 2778 list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) { 2779 if (bitmap_empty(matrix_mdev->apm_add, AP_DEVICES) && 2780 bitmap_empty(matrix_mdev->aqm_add, AP_DOMAINS) && 2781 bitmap_empty(matrix_mdev->adm_add, AP_DOMAINS)) 2782 continue; 2783 2784 vfio_ap_mdev_hot_plug_cfg(matrix_mdev); 2785 bitmap_clear(matrix_mdev->apm_add, 0, AP_DEVICES); 2786 bitmap_clear(matrix_mdev->aqm_add, 0, AP_DOMAINS); 2787 bitmap_clear(matrix_mdev->adm_add, 0, AP_DOMAINS); 2788 } 2789 2790 mutex_unlock(&matrix_dev->guests_lock); 2791 } 2792