Lines Matching +full:gic +full:- +full:v5 +full:- +full:its
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2024-2025 ARM Limited, All Rights Reserved.
6 #define pr_fmt(fmt) "GICv5 ITS: " fmt
19 #include <linux/irqchip/arm-gic-v5.h>
20 #include <linux/irqchip/irq-msi-lib.h>
22 #include "irq-gic-its-msi-parent.h"
47 return readl_relaxed(its_node->its_base + reg_offset); in its_readl_relaxed()
53 writel_relaxed(val, its_node->its_base + reg_offset); in its_writel_relaxed()
59 writeq_relaxed(val, its_node->its_base + reg_offset); in its_writeq_relaxed()
62 static void gicv5_its_dcache_clean(struct gicv5_its_chip_data *its, void *start, in gicv5_its_dcache_clean() argument
67 if (its->flags & ITS_FLAGS_NON_COHERENT) in gicv5_its_dcache_clean()
73 static void its_write_table_entry(struct gicv5_its_chip_data *its, __le64 *entry, in its_write_table_entry() argument
77 gicv5_its_dcache_clean(its, entry, sizeof(*entry)); in its_write_table_entry()
80 #define devtab_cfgr_field(its, f) \ argument
81 FIELD_GET(GICV5_ITS_DT_CFGR_##f, (its)->devtab_cfgr.cfgr)
83 static int gicv5_its_cache_sync(struct gicv5_its_chip_data *its) in gicv5_its_cache_sync() argument
85 return gicv5_wait_for_op_atomic(its->its_base, GICV5_ITS_STATUSR, in gicv5_its_cache_sync()
89 static void gicv5_its_syncr(struct gicv5_its_chip_data *its, in gicv5_its_syncr() argument
95 FIELD_PREP(GICV5_ITS_SYNCR_DEVICEID, its_dev->device_id); in gicv5_its_syncr()
97 its_writeq_relaxed(its, syncr, GICV5_ITS_SYNCR); in gicv5_its_syncr()
99 gicv5_wait_for_op(its->its_base, GICV5_ITS_SYNC_STATUSR, GICV5_ITS_SYNC_STATUSR_IDLE); in gicv5_its_syncr()
120 static int gicv5_its_itt_cache_inv(struct gicv5_its_chip_data *its, u32 device_id, in gicv5_its_itt_cache_inv() argument
130 its_writeq_relaxed(its, didr, GICV5_ITS_DIDR); in gicv5_its_itt_cache_inv()
131 its_writel_relaxed(its, eidr, GICV5_ITS_EIDR); in gicv5_its_itt_cache_inv()
132 its_writel_relaxed(its, eventr, GICV5_ITS_INV_EVENTR); in gicv5_its_itt_cache_inv()
134 return gicv5_its_cache_sync(its); in gicv5_its_itt_cache_inv()
139 kfree(its_dev->itt_cfg.linear.itt); in gicv5_its_free_itt_linear()
144 unsigned int i, num_ents = its_dev->itt_cfg.l2.num_l1_ents; in gicv5_its_free_itt_two_level()
147 kfree(its_dev->itt_cfg.l2.l2ptrs[i]); in gicv5_its_free_itt_two_level()
149 kfree(its_dev->itt_cfg.l2.l2ptrs); in gicv5_its_free_itt_two_level()
150 kfree(its_dev->itt_cfg.l2.l1itt); in gicv5_its_free_itt_two_level()
155 if (!its_dev->itt_cfg.l2itt) in gicv5_its_free_itt()
161 static int gicv5_its_create_itt_linear(struct gicv5_its_chip_data *its, in gicv5_its_create_itt_linear() argument
170 return -ENOMEM; in gicv5_its_create_itt_linear()
172 its_dev->itt_cfg.linear.itt = itt; in gicv5_its_create_itt_linear()
173 its_dev->itt_cfg.linear.num_ents = num_ents; in gicv5_its_create_itt_linear()
174 its_dev->itt_cfg.l2itt = false; in gicv5_its_create_itt_linear()
175 its_dev->itt_cfg.event_id_bits = event_id_bits; in gicv5_its_create_itt_linear()
177 gicv5_its_dcache_clean(its, itt, num_ents * sizeof(*itt)); in gicv5_its_create_itt_linear()
183 * Allocate a two-level ITT. All ITT entries are allocated in one go, unlike
187 static int gicv5_its_create_itt_two_level(struct gicv5_its_chip_data *its, in gicv5_its_create_itt_two_level() argument
203 return -EINVAL; in gicv5_its_create_itt_two_level()
208 l1_bits = event_id_bits - l2_bits; in gicv5_its_create_itt_two_level()
214 return -ENOMEM; in gicv5_its_create_itt_two_level()
219 return -ENOMEM; in gicv5_its_create_itt_two_level()
222 its_dev->itt_cfg.l2.l2ptrs = l2ptrs; in gicv5_its_create_itt_two_level()
224 its_dev->itt_cfg.l2.l2sz = itt_l2sz; in gicv5_its_create_itt_two_level()
225 its_dev->itt_cfg.l2.l1itt = itt_l1; in gicv5_its_create_itt_two_level()
226 its_dev->itt_cfg.l2.num_l1_ents = num_ents; in gicv5_its_create_itt_two_level()
227 its_dev->itt_cfg.l2itt = true; in gicv5_its_create_itt_two_level()
228 its_dev->itt_cfg.event_id_bits = event_id_bits; in gicv5_its_create_itt_two_level()
231 * Need to determine how many entries there are per L2 - this is based in gicv5_its_create_itt_two_level()
245 ret = -ENOMEM; in gicv5_its_create_itt_two_level()
249 its_dev->itt_cfg.l2.l2ptrs[i] = itt_l2; in gicv5_its_create_itt_two_level()
253 gicv5_its_dcache_clean(its, itt_l2, l2sz); in gicv5_its_create_itt_two_level()
262 gicv5_its_dcache_clean(its, itt_l1, num_ents * sizeof(*itt_l1)); in gicv5_its_create_itt_two_level()
267 for (i = i - 1; i >= 0; i--) in gicv5_its_create_itt_two_level()
268 kfree(its_dev->itt_cfg.l2.l2ptrs[i]); in gicv5_its_create_itt_two_level()
270 kfree(its_dev->itt_cfg.l2.l2ptrs); in gicv5_its_create_itt_two_level()
277 * a two-level table and if so depending on the number of id_bits
278 * requested, determine whether a two-level table is required.
280 * Return the 2-level size value if a two level table is deemed
347 if (!its_dev->itt_cfg.l2itt) { in gicv5_its_device_get_itte_ref()
348 __le64 *itt = its_dev->itt_cfg.linear.itt; in gicv5_its_device_get_itte_ref()
353 l2_bits = gicv5_its_l2sz_to_l2_bits(its_dev->itt_cfg.l2.l2sz); in gicv5_its_device_get_itte_ref()
355 l2_idx = event_id & GENMASK(l2_bits - 1, 0); in gicv5_its_device_get_itte_ref()
356 l2_itt = its_dev->itt_cfg.l2.l2ptrs[l1_idx]; in gicv5_its_device_get_itte_ref()
361 static int gicv5_its_device_cache_inv(struct gicv5_its_chip_data *its, in gicv5_its_device_cache_inv() argument
367 didr = FIELD_PREP(GICV5_ITS_DIDR_DEVICEID, its_dev->device_id); in gicv5_its_device_cache_inv()
370 its_dev->itt_cfg.event_id_bits) | in gicv5_its_device_cache_inv()
372 its_writeq_relaxed(its, didr, GICV5_ITS_DIDR); in gicv5_its_device_cache_inv()
373 its_writel_relaxed(its, devicer, GICV5_ITS_INV_DEVICER); in gicv5_its_device_cache_inv()
375 return gicv5_its_cache_sync(its); in gicv5_its_device_cache_inv()
380 * Only used for 2-level device tables, and it is called on demand.
382 static int gicv5_its_alloc_l2_devtab(struct gicv5_its_chip_data *its, in gicv5_its_alloc_l2_devtab() argument
385 __le64 *l2devtab, *l1devtab = its->devtab_cfgr.l2.l1devtab; in gicv5_its_alloc_l2_devtab()
393 l2sz = devtab_cfgr_field(its, L2SZ); in gicv5_its_alloc_l2_devtab()
406 return -ENOMEM; in gicv5_its_alloc_l2_devtab()
408 its->devtab_cfgr.l2.l2ptrs[l1_index] = l2devtab; in gicv5_its_alloc_l2_devtab()
413 its_write_table_entry(its, &l1devtab[l1_index], l1dte); in gicv5_its_alloc_l2_devtab()
418 static __le64 *gicv5_its_devtab_get_dte_ref(struct gicv5_its_chip_data *its, in gicv5_its_devtab_get_dte_ref() argument
421 u8 str = devtab_cfgr_field(its, STRUCTURE); in gicv5_its_devtab_get_dte_ref()
427 l2devtab = its->devtab_cfgr.linear.devtab; in gicv5_its_devtab_get_dte_ref()
431 l2sz = devtab_cfgr_field(its, L2SZ); in gicv5_its_devtab_get_dte_ref()
434 l2_idx = device_id & GENMASK(l2_bits - 1, 0); in gicv5_its_devtab_get_dte_ref()
443 ret = gicv5_its_alloc_l2_devtab(its, l1_idx); in gicv5_its_devtab_get_dte_ref()
448 l2devtab = its->devtab_cfgr.l2.l2ptrs[l1_idx]; in gicv5_its_devtab_get_dte_ref()
457 static int gicv5_its_device_register(struct gicv5_its_chip_data *its, in gicv5_its_device_register() argument
468 device_id_bits = devtab_cfgr_field(its, DEVICEID_BITS); in gicv5_its_device_register()
470 if (its_dev->device_id >= BIT(device_id_bits)) { in gicv5_its_device_register()
472 its_dev->device_id, (u32)GENMASK(device_id_bits - 1, 0)); in gicv5_its_device_register()
473 return -EINVAL; in gicv5_its_device_register()
476 dte = gicv5_its_devtab_get_dte_ref(its, its_dev->device_id, true); in gicv5_its_device_register()
478 return -ENOMEM; in gicv5_its_device_register()
481 return -EBUSY; in gicv5_its_device_register()
485 * Based on these, determine if we should go for a 1- or 2-level ITT. in gicv5_its_device_register()
487 event_id_bits = order_base_2(its_dev->num_events); in gicv5_its_device_register()
489 idr2 = its_readl_relaxed(its, GICV5_ITS_IDR2); in gicv5_its_device_register()
495 return -EINVAL; in gicv5_its_device_register()
498 idr1 = its_readl_relaxed(its, GICV5_ITS_IDR1); in gicv5_its_device_register()
502 * whether a two-level or linear ITT is built, init it. in gicv5_its_device_register()
509 ret = gicv5_its_create_itt_two_level(its, its_dev, event_id_bits, in gicv5_its_device_register()
511 its_dev->num_events); in gicv5_its_device_register()
513 ret = gicv5_its_create_itt_linear(its, its_dev, event_id_bits); in gicv5_its_device_register()
517 itt_phys_base = two_level_itt ? virt_to_phys(its_dev->itt_cfg.l2.l1itt) : in gicv5_its_device_register()
518 virt_to_phys(its_dev->itt_cfg.linear.itt); in gicv5_its_device_register()
529 its_write_table_entry(its, dte, val); in gicv5_its_device_register()
531 ret = gicv5_its_device_cache_inv(its, its_dev); in gicv5_its_device_register()
533 its_write_table_entry(its, dte, 0); in gicv5_its_device_register()
545 static int gicv5_its_device_unregister(struct gicv5_its_chip_data *its, in gicv5_its_device_unregister() argument
550 dte = gicv5_its_devtab_get_dte_ref(its, its_dev->device_id, false); in gicv5_its_device_unregister()
554 its_dev->device_id); in gicv5_its_device_unregister()
555 return -EINVAL; in gicv5_its_device_unregister()
558 /* Zero everything - make it clear that this is an invalid entry */ in gicv5_its_device_unregister()
559 its_write_table_entry(its, dte, 0); in gicv5_its_device_unregister()
563 return gicv5_its_device_cache_inv(its, its_dev); in gicv5_its_device_unregister()
567 * Allocate a 1-level device table. All entries are allocated, but marked
570 static int gicv5_its_alloc_devtab_linear(struct gicv5_its_chip_data *its, in gicv5_its_alloc_devtab_linear() argument
580 * deviceID bits to support a 2-level device table. If that's not in gicv5_its_alloc_devtab_linear()
596 return -ENOMEM; in gicv5_its_alloc_devtab_linear()
598 gicv5_its_dcache_clean(its, devtab, sz); in gicv5_its_alloc_devtab_linear()
604 its_writel_relaxed(its, cfgr, GICV5_ITS_DT_CFGR); in gicv5_its_alloc_devtab_linear()
607 its_writeq_relaxed(its, baser, GICV5_ITS_DT_BASER); in gicv5_its_alloc_devtab_linear()
609 its->devtab_cfgr.cfgr = cfgr; in gicv5_its_alloc_devtab_linear()
610 its->devtab_cfgr.linear.devtab = devtab; in gicv5_its_alloc_devtab_linear()
616 * Allocate a 2-level device table. L2 entries are not allocated,
617 * they are allocated on-demand.
619 static int gicv5_its_alloc_devtab_two_level(struct gicv5_its_chip_data *its, in gicv5_its_alloc_devtab_two_level() argument
631 l1_bits = device_id_bits - l2_bits; in gicv5_its_alloc_devtab_two_level()
634 * With 2-level device table support it is highly unlikely in gicv5_its_alloc_devtab_two_level()
637 * deviceID space if we encounter such set-up. in gicv5_its_alloc_devtab_two_level()
639 * behind level 2 size selection to reduce level-1 deviceID bits. in gicv5_its_alloc_devtab_two_level()
652 return -ENOMEM; in gicv5_its_alloc_devtab_two_level()
657 return -ENOMEM; in gicv5_its_alloc_devtab_two_level()
663 gicv5_its_dcache_clean(its, l1devtab, l1_sz); in gicv5_its_alloc_devtab_two_level()
669 its_writel_relaxed(its, cfgr, GICV5_ITS_DT_CFGR); in gicv5_its_alloc_devtab_two_level()
672 its_writeq_relaxed(its, baser, GICV5_ITS_DT_BASER); in gicv5_its_alloc_devtab_two_level()
674 its->devtab_cfgr.cfgr = cfgr; in gicv5_its_alloc_devtab_two_level()
675 its->devtab_cfgr.l2.l1devtab = l1devtab; in gicv5_its_alloc_devtab_two_level()
676 its->devtab_cfgr.l2.l2ptrs = l2ptrs; in gicv5_its_alloc_devtab_two_level()
682 * Initialise the device table as either 1- or 2-level depending on what is
685 static int gicv5_its_init_devtab(struct gicv5_its_chip_data *its) in gicv5_its_init_devtab() argument
691 idr1 = its_readl_relaxed(its, GICV5_ITS_IDR1); in gicv5_its_init_devtab()
697 return gicv5_its_alloc_devtab_two_level(its, device_id_bits, in gicv5_its_init_devtab()
700 return gicv5_its_alloc_devtab_linear(its, device_id_bits); in gicv5_its_init_devtab()
703 static void gicv5_its_deinit_devtab(struct gicv5_its_chip_data *its) in gicv5_its_deinit_devtab() argument
705 u8 str = devtab_cfgr_field(its, STRUCTURE); in gicv5_its_deinit_devtab()
708 kfree(its->devtab_cfgr.linear.devtab); in gicv5_its_deinit_devtab()
710 kfree(its->devtab_cfgr.l2.l1devtab); in gicv5_its_deinit_devtab()
711 kfree(its->devtab_cfgr.l2.l2ptrs); in gicv5_its_deinit_devtab()
718 u64 addr = its_dev->its_trans_phys_base; in gicv5_its_compose_msi_msg()
720 msg->data = FIELD_GET(GICV5_ITS_HWIRQ_EVENT_ID, d->hwirq); in gicv5_its_compose_msi_msg()
725 .name = "GICv5-ITS-MSI",
735 static struct gicv5_its_dev *gicv5_its_find_device(struct gicv5_its_chip_data *its, in gicv5_its_find_device() argument
738 struct gicv5_its_dev *dev = xa_load(&its->its_devices, device_id); in gicv5_its_find_device()
740 return dev ? dev : ERR_PTR(-ENODEV); in gicv5_its_find_device()
743 static struct gicv5_its_dev *gicv5_its_alloc_device(struct gicv5_its_chip_data *its, int nvec, in gicv5_its_alloc_device() argument
750 its_dev = gicv5_its_find_device(its, dev_id); in gicv5_its_alloc_device()
755 return ERR_PTR(-EBUSY); in gicv5_its_alloc_device()
760 return ERR_PTR(-ENOMEM); in gicv5_its_alloc_device()
762 its_dev->device_id = dev_id; in gicv5_its_alloc_device()
763 its_dev->num_events = nvec; in gicv5_its_alloc_device()
765 ret = gicv5_its_device_register(its, its_dev); in gicv5_its_alloc_device()
771 gicv5_its_device_cache_inv(its, its_dev); in gicv5_its_alloc_device()
773 its_dev->its_node = its; in gicv5_its_alloc_device()
775 its_dev->event_map = (unsigned long *)bitmap_zalloc(its_dev->num_events, GFP_KERNEL); in gicv5_its_alloc_device()
776 if (!its_dev->event_map) { in gicv5_its_alloc_device()
777 ret = -ENOMEM; in gicv5_its_alloc_device()
781 entry = xa_store(&its->its_devices, dev_id, its_dev, GFP_KERNEL); in gicv5_its_alloc_device()
790 bitmap_free(its_dev->event_map); in gicv5_its_alloc_device()
792 gicv5_its_device_unregister(its, its_dev); in gicv5_its_alloc_device()
801 u32 dev_id = info->scratchpad[0].ul; in gicv5_its_msi_prepare()
803 struct gicv5_its_chip_data *its; in gicv5_its_msi_prepare() local
807 its = msi_info->data; in gicv5_its_msi_prepare()
809 guard(mutex)(&its->dev_alloc_lock); in gicv5_its_msi_prepare()
811 its_dev = gicv5_its_alloc_device(its, nvec, dev_id); in gicv5_its_msi_prepare()
815 its_dev->its_trans_phys_base = info->scratchpad[1].ul; in gicv5_its_msi_prepare()
816 info->scratchpad[0].ptr = its_dev; in gicv5_its_msi_prepare()
823 struct gicv5_its_dev *its_dev = info->scratchpad[0].ptr; in gicv5_its_msi_teardown()
825 struct gicv5_its_chip_data *its; in gicv5_its_msi_teardown() local
828 its = msi_info->data; in gicv5_its_msi_teardown()
830 guard(mutex)(&its->dev_alloc_lock); in gicv5_its_msi_teardown()
832 if (WARN_ON_ONCE(!bitmap_empty(its_dev->event_map, its_dev->num_events))) in gicv5_its_msi_teardown()
835 xa_erase(&its->its_devices, its_dev->device_id); in gicv5_its_msi_teardown()
836 bitmap_free(its_dev->event_map); in gicv5_its_msi_teardown()
837 gicv5_its_device_unregister(its, its_dev); in gicv5_its_msi_teardown()
848 struct gicv5_its_chip_data *its = its_dev->its_node; in gicv5_its_map_event() local
855 return -EEXIST; in gicv5_its_map_event()
860 its_write_table_entry(its, itte, itt_entry); in gicv5_its_map_event()
862 gicv5_its_itt_cache_inv(its, its_dev->device_id, event_id); in gicv5_its_map_event()
869 struct gicv5_its_chip_data *its = its_dev->its_node; in gicv5_its_unmap_event() local
878 its_write_table_entry(its, itte, itte_val); in gicv5_its_unmap_event()
880 gicv5_its_itt_cache_inv(its, its_dev->device_id, event_id); in gicv5_its_unmap_event()
888 if (!(info->flags & MSI_ALLOC_FLAGS_FIXED_MSG_DATA)) { in gicv5_its_alloc_eventid()
889 event_id_base = bitmap_find_free_region(its_dev->event_map, in gicv5_its_alloc_eventid()
890 its_dev->num_events, in gicv5_its_alloc_eventid()
900 return -EINVAL; in gicv5_its_alloc_eventid()
902 event_id_base = info->hwirq; in gicv5_its_alloc_eventid()
904 if (event_id_base >= its_dev->num_events) { in gicv5_its_alloc_eventid()
907 return -EINVAL; in gicv5_its_alloc_eventid()
910 if (test_and_set_bit(event_id_base, its_dev->event_map)) { in gicv5_its_alloc_eventid()
912 return -EINVAL; in gicv5_its_alloc_eventid()
925 bitmap_release_region(its_dev->event_map, event_id_base, in gicv5_its_free_eventid()
939 its_dev = info->scratchpad[0].ptr; in gicv5_its_irq_domain_alloc()
945 ret = iommu_dma_prepare_msi(info->desc, its_dev->its_trans_phys_base); in gicv5_its_irq_domain_alloc()
949 device_id = its_dev->device_id; in gicv5_its_irq_domain_alloc()
991 struct gicv5_its_chip_data *its; in gicv5_its_irq_domain_free() local
997 its = its_dev->its_node; in gicv5_its_irq_domain_free()
999 event_id_base = FIELD_GET(GICV5_ITS_HWIRQ_EVENT_ID, d->hwirq); in gicv5_its_irq_domain_free()
1001 bitmap_release_region(its_dev->event_map, event_id_base, in gicv5_its_irq_domain_free()
1008 gicv5_free_lpi(d->parent_data->hwirq); in gicv5_its_irq_domain_free()
1013 gicv5_its_syncr(its, its_dev); in gicv5_its_irq_domain_free()
1024 event_id = FIELD_GET(GICV5_ITS_HWIRQ_EVENT_ID, d->hwirq); in gicv5_its_irq_domain_activate()
1025 lpi = d->parent_data->hwirq; in gicv5_its_irq_domain_activate()
1036 event_id = FIELD_GET(GICV5_ITS_HWIRQ_EVENT_ID, d->hwirq); in gicv5_its_irq_domain_deactivate()
1049 static int gicv5_its_write_cr0(struct gicv5_its_chip_data *its, bool enable) in gicv5_its_write_cr0() argument
1053 its_writel_relaxed(its, cr0, GICV5_ITS_CR0); in gicv5_its_write_cr0()
1054 return gicv5_wait_for_op_atomic(its->its_base, GICV5_ITS_CR0, in gicv5_its_write_cr0()
1058 static int gicv5_its_enable(struct gicv5_its_chip_data *its) in gicv5_its_enable() argument
1060 return gicv5_its_write_cr0(its, true); in gicv5_its_enable()
1063 static int gicv5_its_disable(struct gicv5_its_chip_data *its) in gicv5_its_disable() argument
1065 return gicv5_its_write_cr0(its, false); in gicv5_its_disable()
1079 pr_info("ITS %s enabled using %s device table device_id_bits %u\n", in gicv5_its_print_info()
1080 fwnode_get_name(its_node->fwnode), in gicv5_its_print_info()
1081 devtab_linear ? "linear" : "2-level", in gicv5_its_print_info()
1085 static int gicv5_its_init_domain(struct gicv5_its_chip_data *its, struct irq_domain *parent) in gicv5_its_init_domain() argument
1088 .fwnode = its->fwnode, in gicv5_its_init_domain()
1090 .domain_flags = its->msi_domain_flags, in gicv5_its_init_domain()
1097 return -ENOMEM; in gicv5_its_init_domain()
1099 info->ops = &gicv5_its_msi_domain_ops; in gicv5_its_init_domain()
1100 info->data = its; in gicv5_its_init_domain()
1105 return -ENOMEM; in gicv5_its_init_domain()
1122 return -ENOMEM; in gicv5_its_init_bases()
1124 mutex_init(&its_node->dev_alloc_lock); in gicv5_its_init_bases()
1125 xa_init(&its_node->its_devices); in gicv5_its_init_bases()
1126 its_node->fwnode = handle; in gicv5_its_init_bases()
1127 its_node->its_base = its_base; in gicv5_its_init_bases()
1128 its_node->msi_domain_flags = IRQ_DOMAIN_FLAG_ISOLATED_MSI | in gicv5_its_init_bases()
1133 if (WARN(enabled, "ITS %s enabled, disabling it before proceeding\n", np->full_name)) { in gicv5_its_init_bases()
1139 if (of_property_read_bool(np, "dma-noncoherent")) { in gicv5_its_init_bases()
1141 * A non-coherent ITS implies that some cache levels cannot be in gicv5_its_init_bases()
1142 * used coherently by the cores and GIC. Our only option is to mark in gicv5_its_init_bases()
1143 * memory attributes for the GIC as non-cacheable; by default, in gicv5_its_init_bases()
1144 * non-cacheable memory attributes imply outer-shareable in gicv5_its_init_bases()
1151 its_node->flags |= ITS_FLAGS_NON_COHERENT; in gicv5_its_init_bases()
1192 idx = of_property_match_string(node, "reg-names", "ns-config"); in gicv5_its_init()
1194 pr_err("%pOF: ns-config reg-name not present\n", node); in gicv5_its_init()
1195 return -ENODEV; in gicv5_its_init()
1221 if (!of_device_is_compatible(np, "arm,gic-v5-its")) in gicv5_its_of_probe()
1225 pr_err("Failed to init ITS %s\n", np->full_name); in gicv5_its_of_probe()