region.c (b9686e8c8e39d4072081ef078c04915ee51c8af4) region.c (384e624bb211b406db40edc900bb51af8bb267d0)
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
3#include <linux/memregion.h>
4#include <linux/genalloc.h>
5#include <linux/device.h>
6#include <linux/module.h>
7#include <linux/slab.h>
8#include <linux/uuid.h>

--- 425 unchanged lines hidden (view full) ---

434 else
435 rc = sysfs_emit(buf, "%s\n", dev_name(&cxled->cxld.dev));
436out:
437 up_read(&cxl_region_rwsem);
438
439 return rc;
440}
441
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
3#include <linux/memregion.h>
4#include <linux/genalloc.h>
5#include <linux/device.h>
6#include <linux/module.h>
7#include <linux/slab.h>
8#include <linux/uuid.h>

--- 425 unchanged lines hidden (view full) ---

434 else
435 rc = sysfs_emit(buf, "%s\n", dev_name(&cxled->cxld.dev));
436out:
437 up_read(&cxl_region_rwsem);
438
439 return rc;
440}
441
442/*
443 * - Check that the given endpoint is attached to a host-bridge identified
444 * in the root interleave.
442static int match_free_decoder(struct device *dev, void *data)
443{
444 struct cxl_decoder *cxld;
445 int *id = data;
446
447 if (!is_switch_decoder(dev))
448 return 0;
449
450 cxld = to_cxl_decoder(dev);
451
452 /* enforce ordered allocation */
453 if (cxld->id != *id)
454 return 0;
455
456 if (!cxld->region)
457 return 1;
458
459 (*id)++;
460
461 return 0;
462}
463
464static struct cxl_decoder *cxl_region_find_decoder(struct cxl_port *port,
465 struct cxl_region *cxlr)
466{
467 struct device *dev;
468 int id = 0;
469
470 dev = device_find_child(&port->dev, &id, match_free_decoder);
471 if (!dev)
472 return NULL;
473 /*
474 * This decoder is pinned registered as long as the endpoint decoder is
475 * registered, and endpoint decoder unregistration holds the
476 * cxl_region_rwsem over unregister events, so no need to hold on to
477 * this extra reference.
478 */
479 put_device(dev);
480 return to_cxl_decoder(dev);
481}
482
483static struct cxl_region_ref *alloc_region_ref(struct cxl_port *port,
484 struct cxl_region *cxlr)
485{
486 struct cxl_region_ref *cxl_rr;
487 int rc;
488
489 cxl_rr = kzalloc(sizeof(*cxl_rr), GFP_KERNEL);
490 if (!cxl_rr)
491 return NULL;
492 cxl_rr->port = port;
493 cxl_rr->region = cxlr;
494 xa_init(&cxl_rr->endpoints);
495
496 rc = xa_insert(&port->regions, (unsigned long)cxlr, cxl_rr, GFP_KERNEL);
497 if (rc) {
498 dev_dbg(&cxlr->dev,
499 "%s: failed to track region reference: %d\n",
500 dev_name(&port->dev), rc);
501 kfree(cxl_rr);
502 return NULL;
503 }
504
505 return cxl_rr;
506}
507
508static void free_region_ref(struct cxl_region_ref *cxl_rr)
509{
510 struct cxl_port *port = cxl_rr->port;
511 struct cxl_region *cxlr = cxl_rr->region;
512 struct cxl_decoder *cxld = cxl_rr->decoder;
513
514 dev_WARN_ONCE(&cxlr->dev, cxld->region != cxlr, "region mismatch\n");
515 if (cxld->region == cxlr) {
516 cxld->region = NULL;
517 put_device(&cxlr->dev);
518 }
519
520 xa_erase(&port->regions, (unsigned long)cxlr);
521 xa_destroy(&cxl_rr->endpoints);
522 kfree(cxl_rr);
523}
524
525static int cxl_rr_ep_add(struct cxl_region_ref *cxl_rr,
526 struct cxl_endpoint_decoder *cxled)
527{
528 int rc;
529 struct cxl_port *port = cxl_rr->port;
530 struct cxl_region *cxlr = cxl_rr->region;
531 struct cxl_decoder *cxld = cxl_rr->decoder;
532 struct cxl_ep *ep = cxl_ep_load(port, cxled_to_memdev(cxled));
533
534 rc = xa_insert(&cxl_rr->endpoints, (unsigned long)cxled, ep,
535 GFP_KERNEL);
536 if (rc)
537 return rc;
538 cxl_rr->nr_eps++;
539
540 if (!cxld->region) {
541 cxld->region = cxlr;
542 get_device(&cxlr->dev);
543 }
544
545 return 0;
546}
547
548/**
549 * cxl_port_attach_region() - track a region's interest in a port by endpoint
550 * @port: port to add a new region reference 'struct cxl_region_ref'
551 * @cxlr: region to attach to @port
552 * @cxled: endpoint decoder used to create or further pin a region reference
553 * @pos: interleave position of @cxled in @cxlr
554 *
555 * The attach event is an opportunity to validate CXL decode setup
556 * constraints and record metadata needed for programming HDM decoders,
557 * in particular decoder target lists.
558 *
559 * The steps are:
560 * - validate that there are no other regions with a higher HPA already
561 * associated with @port
562 * - establish a region reference if one is not already present
563 * - additionally allocate a decoder instance that will host @cxlr on
564 * @port
565 * - pin the region reference by the endpoint
566 * - account for how many entries in @port's target list are needed to
567 * cover all of the added endpoints.
445 */
568 */
569static int cxl_port_attach_region(struct cxl_port *port,
570 struct cxl_region *cxlr,
571 struct cxl_endpoint_decoder *cxled, int pos)
572{
573 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
574 struct cxl_ep *ep = cxl_ep_load(port, cxlmd);
575 struct cxl_region_ref *cxl_rr = NULL, *iter;
576 struct cxl_region_params *p = &cxlr->params;
577 struct cxl_decoder *cxld = NULL;
578 unsigned long index;
579 int rc = -EBUSY;
580
581 lockdep_assert_held_write(&cxl_region_rwsem);
582
583 xa_for_each(&port->regions, index, iter) {
584 struct cxl_region_params *ip = &iter->region->params;
585
586 if (iter->region == cxlr)
587 cxl_rr = iter;
588 if (ip->res->start > p->res->start) {
589 dev_dbg(&cxlr->dev,
590 "%s: HPA order violation %s:%pr vs %pr\n",
591 dev_name(&port->dev),
592 dev_name(&iter->region->dev), ip->res, p->res);
593 return -EBUSY;
594 }
595 }
596
597 if (cxl_rr) {
598 struct cxl_ep *ep_iter;
599 int found = 0;
600
601 cxld = cxl_rr->decoder;
602 xa_for_each(&cxl_rr->endpoints, index, ep_iter) {
603 if (ep_iter == ep)
604 continue;
605 if (ep_iter->next == ep->next) {
606 found++;
607 break;
608 }
609 }
610
611 /*
612 * If this is a new target or if this port is direct connected
613 * to this endpoint then add to the target count.
614 */
615 if (!found || !ep->next)
616 cxl_rr->nr_targets++;
617 } else {
618 cxl_rr = alloc_region_ref(port, cxlr);
619 if (!cxl_rr) {
620 dev_dbg(&cxlr->dev,
621 "%s: failed to allocate region reference\n",
622 dev_name(&port->dev));
623 return -ENOMEM;
624 }
625 }
626
627 if (!cxld) {
628 if (port == cxled_to_port(cxled))
629 cxld = &cxled->cxld;
630 else
631 cxld = cxl_region_find_decoder(port, cxlr);
632 if (!cxld) {
633 dev_dbg(&cxlr->dev, "%s: no decoder available\n",
634 dev_name(&port->dev));
635 goto out_erase;
636 }
637
638 if (cxld->region) {
639 dev_dbg(&cxlr->dev, "%s: %s already attached to %s\n",
640 dev_name(&port->dev), dev_name(&cxld->dev),
641 dev_name(&cxld->region->dev));
642 rc = -EBUSY;
643 goto out_erase;
644 }
645
646 cxl_rr->decoder = cxld;
647 }
648
649 rc = cxl_rr_ep_add(cxl_rr, cxled);
650 if (rc) {
651 dev_dbg(&cxlr->dev,
652 "%s: failed to track endpoint %s:%s reference\n",
653 dev_name(&port->dev), dev_name(&cxlmd->dev),
654 dev_name(&cxld->dev));
655 goto out_erase;
656 }
657
658 return 0;
659out_erase:
660 if (cxl_rr->nr_eps == 0)
661 free_region_ref(cxl_rr);
662 return rc;
663}
664
665static struct cxl_region_ref *cxl_rr_load(struct cxl_port *port,
666 struct cxl_region *cxlr)
667{
668 return xa_load(&port->regions, (unsigned long)cxlr);
669}
670
671static void cxl_port_detach_region(struct cxl_port *port,
672 struct cxl_region *cxlr,
673 struct cxl_endpoint_decoder *cxled)
674{
675 struct cxl_region_ref *cxl_rr;
676 struct cxl_ep *ep;
677
678 lockdep_assert_held_write(&cxl_region_rwsem);
679
680 cxl_rr = cxl_rr_load(port, cxlr);
681 if (!cxl_rr)
682 return;
683
684 ep = xa_erase(&cxl_rr->endpoints, (unsigned long)cxled);
685 if (ep) {
686 struct cxl_ep *ep_iter;
687 unsigned long index;
688 int found = 0;
689
690 cxl_rr->nr_eps--;
691 xa_for_each(&cxl_rr->endpoints, index, ep_iter) {
692 if (ep_iter->next == ep->next) {
693 found++;
694 break;
695 }
696 }
697 if (!found)
698 cxl_rr->nr_targets--;
699 }
700
701 if (cxl_rr->nr_eps == 0)
702 free_region_ref(cxl_rr);
703}
704
446static int cxl_region_attach(struct cxl_region *cxlr,
447 struct cxl_endpoint_decoder *cxled, int pos)
448{
705static int cxl_region_attach(struct cxl_region *cxlr,
706 struct cxl_endpoint_decoder *cxled, int pos)
707{
708 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
709 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
710 struct cxl_port *ep_port, *root_port, *iter;
449 struct cxl_region_params *p = &cxlr->params;
711 struct cxl_region_params *p = &cxlr->params;
712 struct cxl_dport *dport;
713 int i, rc = -ENXIO;
450
451 if (cxled->mode == CXL_DECODER_DEAD) {
452 dev_dbg(&cxlr->dev, "%s dead\n", dev_name(&cxled->cxld.dev));
453 return -ENODEV;
454 }
455
714
715 if (cxled->mode == CXL_DECODER_DEAD) {
716 dev_dbg(&cxlr->dev, "%s dead\n", dev_name(&cxled->cxld.dev));
717 return -ENODEV;
718 }
719
456 if (pos >= p->interleave_ways) {
720 /* all full of members, or interleave config not established? */
721 if (p->state > CXL_CONFIG_INTERLEAVE_ACTIVE) {
722 dev_dbg(&cxlr->dev, "region already active\n");
723 return -EBUSY;
724 } else if (p->state < CXL_CONFIG_INTERLEAVE_ACTIVE) {
725 dev_dbg(&cxlr->dev, "interleave config missing\n");
726 return -ENXIO;
727 }
728
729 if (pos < 0 || pos >= p->interleave_ways) {
457 dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos,
458 p->interleave_ways);
459 return -ENXIO;
460 }
461
462 if (p->targets[pos] == cxled)
463 return 0;
464
465 if (p->targets[pos]) {
466 struct cxl_endpoint_decoder *cxled_target = p->targets[pos];
467 struct cxl_memdev *cxlmd_target = cxled_to_memdev(cxled_target);
468
469 dev_dbg(&cxlr->dev, "position %d already assigned to %s:%s\n",
470 pos, dev_name(&cxlmd_target->dev),
471 dev_name(&cxled_target->cxld.dev));
472 return -EBUSY;
473 }
474
730 dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos,
731 p->interleave_ways);
732 return -ENXIO;
733 }
734
735 if (p->targets[pos] == cxled)
736 return 0;
737
738 if (p->targets[pos]) {
739 struct cxl_endpoint_decoder *cxled_target = p->targets[pos];
740 struct cxl_memdev *cxlmd_target = cxled_to_memdev(cxled_target);
741
742 dev_dbg(&cxlr->dev, "position %d already assigned to %s:%s\n",
743 pos, dev_name(&cxlmd_target->dev),
744 dev_name(&cxled_target->cxld.dev));
745 return -EBUSY;
746 }
747
748 for (i = 0; i < p->interleave_ways; i++) {
749 struct cxl_endpoint_decoder *cxled_target;
750 struct cxl_memdev *cxlmd_target;
751
752 cxled_target = p->targets[pos];
753 if (!cxled_target)
754 continue;
755
756 cxlmd_target = cxled_to_memdev(cxled_target);
757 if (cxlmd_target == cxlmd) {
758 dev_dbg(&cxlr->dev,
759 "%s already specified at position %d via: %s\n",
760 dev_name(&cxlmd->dev), pos,
761 dev_name(&cxled_target->cxld.dev));
762 return -EBUSY;
763 }
764 }
765
766 ep_port = cxled_to_port(cxled);
767 root_port = cxlrd_to_port(cxlrd);
768 dport = cxl_find_dport_by_dev(root_port, ep_port->host_bridge);
769 if (!dport) {
770 dev_dbg(&cxlr->dev, "%s:%s invalid target for %s\n",
771 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
772 dev_name(cxlr->dev.parent));
773 return -ENXIO;
774 }
775
776 if (cxlrd->calc_hb(cxlrd, pos) != dport) {
777 dev_dbg(&cxlr->dev, "%s:%s invalid target position for %s\n",
778 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
779 dev_name(&cxlrd->cxlsd.cxld.dev));
780 return -ENXIO;
781 }
782
783 if (cxled->cxld.target_type != cxlr->type) {
784 dev_dbg(&cxlr->dev, "%s:%s type mismatch: %d vs %d\n",
785 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
786 cxled->cxld.target_type, cxlr->type);
787 return -ENXIO;
788 }
789
790 if (!cxled->dpa_res) {
791 dev_dbg(&cxlr->dev, "%s:%s: missing DPA allocation.\n",
792 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev));
793 return -ENXIO;
794 }
795
796 if (resource_size(cxled->dpa_res) * p->interleave_ways !=
797 resource_size(p->res)) {
798 dev_dbg(&cxlr->dev,
799 "%s:%s: decoder-size-%#llx * ways-%d != region-size-%#llx\n",
800 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
801 (u64)resource_size(cxled->dpa_res), p->interleave_ways,
802 (u64)resource_size(p->res));
803 return -EINVAL;
804 }
805
806 for (iter = ep_port; !is_cxl_root(iter);
807 iter = to_cxl_port(iter->dev.parent)) {
808 rc = cxl_port_attach_region(iter, cxlr, cxled, pos);
809 if (rc)
810 goto err;
811 }
812
475 p->targets[pos] = cxled;
476 cxled->pos = pos;
477 p->nr_targets++;
478
813 p->targets[pos] = cxled;
814 cxled->pos = pos;
815 p->nr_targets++;
816
817 if (p->nr_targets == p->interleave_ways)
818 p->state = CXL_CONFIG_ACTIVE;
819
479 return 0;
820 return 0;
821
822err:
823 for (iter = ep_port; !is_cxl_root(iter);
824 iter = to_cxl_port(iter->dev.parent))
825 cxl_port_detach_region(iter, cxlr, cxled);
826 return rc;
480}
481
482static void cxl_region_detach(struct cxl_endpoint_decoder *cxled)
483{
827}
828
829static void cxl_region_detach(struct cxl_endpoint_decoder *cxled)
830{
831 struct cxl_port *iter, *ep_port = cxled_to_port(cxled);
484 struct cxl_region *cxlr = cxled->cxld.region;
485 struct cxl_region_params *p;
486
487 lockdep_assert_held_write(&cxl_region_rwsem);
488
489 if (!cxlr)
490 return;
491
492 p = &cxlr->params;
493 get_device(&cxlr->dev);
494
832 struct cxl_region *cxlr = cxled->cxld.region;
833 struct cxl_region_params *p;
834
835 lockdep_assert_held_write(&cxl_region_rwsem);
836
837 if (!cxlr)
838 return;
839
840 p = &cxlr->params;
841 get_device(&cxlr->dev);
842
843 for (iter = ep_port; !is_cxl_root(iter);
844 iter = to_cxl_port(iter->dev.parent))
845 cxl_port_detach_region(iter, cxlr, cxled);
846
495 if (cxled->pos < 0 || cxled->pos >= p->interleave_ways ||
496 p->targets[cxled->pos] != cxled) {
497 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
498
499 dev_WARN_ONCE(&cxlr->dev, 1, "expected %s:%s at position %d\n",
500 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
501 cxled->pos);
502 goto out;
503 }
504
847 if (cxled->pos < 0 || cxled->pos >= p->interleave_ways ||
848 p->targets[cxled->pos] != cxled) {
849 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
850
851 dev_WARN_ONCE(&cxlr->dev, 1, "expected %s:%s at position %d\n",
852 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
853 cxled->pos);
854 goto out;
855 }
856
857 if (p->state == CXL_CONFIG_ACTIVE)
858 p->state = CXL_CONFIG_INTERLEAVE_ACTIVE;
505 p->targets[cxled->pos] = NULL;
506 p->nr_targets--;
507
859 p->targets[cxled->pos] = NULL;
860 p->nr_targets--;
861
508 /* notify the region driver that one of its targets has deparated */
862 /* notify the region driver that one of its targets has departed */
509 up_write(&cxl_region_rwsem);
510 device_release_driver(&cxlr->dev);
511 down_write(&cxl_region_rwsem);
512out:
513 put_device(&cxlr->dev);
514}
515
516void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled)

--- 365 unchanged lines hidden ---
863 up_write(&cxl_region_rwsem);
864 device_release_driver(&cxlr->dev);
865 down_write(&cxl_region_rwsem);
866out:
867 put_device(&cxlr->dev);
868}
869
870void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled)

--- 365 unchanged lines hidden ---