1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2024-2025 ARM Limited, All Rights Reserved.
4 */
5
6 #define pr_fmt(fmt) "GICv5 ITS: " fmt
7
8 #include <linux/bitmap.h>
9 #include <linux/iommu.h>
10 #include <linux/init.h>
11 #include <linux/kernel.h>
12 #include <linux/msi.h>
13 #include <linux/of.h>
14 #include <linux/of_address.h>
15 #include <linux/of_irq.h>
16 #include <linux/slab.h>
17
18 #include <linux/irqchip.h>
19 #include <linux/irqchip/arm-gic-v5.h>
20 #include <linux/irqchip/irq-msi-lib.h>
21
22 #include "irq-gic-its-msi-parent.h"
23
24 #define ITS_FLAGS_NON_COHERENT BIT(0)
25
26 struct gicv5_its_chip_data {
27 struct xarray its_devices;
28 struct mutex dev_alloc_lock;
29 struct fwnode_handle *fwnode;
30 struct gicv5_its_devtab_cfg devtab_cfgr;
31 void __iomem *its_base;
32 u32 flags;
33 unsigned int msi_domain_flags;
34 };
35
36 struct gicv5_its_dev {
37 struct gicv5_its_chip_data *its_node;
38 struct gicv5_its_itt_cfg itt_cfg;
39 unsigned long *event_map;
40 u32 device_id;
41 u32 num_events;
42 phys_addr_t its_trans_phys_base;
43 };
44
its_readl_relaxed(struct gicv5_its_chip_data * its_node,const u32 reg_offset)45 static u32 its_readl_relaxed(struct gicv5_its_chip_data *its_node, const u32 reg_offset)
46 {
47 return readl_relaxed(its_node->its_base + reg_offset);
48 }
49
its_writel_relaxed(struct gicv5_its_chip_data * its_node,const u32 val,const u32 reg_offset)50 static void its_writel_relaxed(struct gicv5_its_chip_data *its_node, const u32 val,
51 const u32 reg_offset)
52 {
53 writel_relaxed(val, its_node->its_base + reg_offset);
54 }
55
its_writeq_relaxed(struct gicv5_its_chip_data * its_node,const u64 val,const u32 reg_offset)56 static void its_writeq_relaxed(struct gicv5_its_chip_data *its_node, const u64 val,
57 const u32 reg_offset)
58 {
59 writeq_relaxed(val, its_node->its_base + reg_offset);
60 }
61
gicv5_its_dcache_clean(struct gicv5_its_chip_data * its,void * start,size_t sz)62 static void gicv5_its_dcache_clean(struct gicv5_its_chip_data *its, void *start,
63 size_t sz)
64 {
65 void *end = start + sz;
66
67 if (its->flags & ITS_FLAGS_NON_COHERENT)
68 dcache_clean_inval_poc((unsigned long)start, (unsigned long)end);
69 else
70 dsb(ishst);
71 }
72
its_write_table_entry(struct gicv5_its_chip_data * its,__le64 * entry,u64 val)73 static void its_write_table_entry(struct gicv5_its_chip_data *its, __le64 *entry,
74 u64 val)
75 {
76 WRITE_ONCE(*entry, cpu_to_le64(val));
77 gicv5_its_dcache_clean(its, entry, sizeof(*entry));
78 }
79
80 #define devtab_cfgr_field(its, f) \
81 FIELD_GET(GICV5_ITS_DT_CFGR_##f, (its)->devtab_cfgr.cfgr)
82
gicv5_its_cache_sync(struct gicv5_its_chip_data * its)83 static int gicv5_its_cache_sync(struct gicv5_its_chip_data *its)
84 {
85 return gicv5_wait_for_op_atomic(its->its_base, GICV5_ITS_STATUSR,
86 GICV5_ITS_STATUSR_IDLE, NULL);
87 }
88
gicv5_its_syncr(struct gicv5_its_chip_data * its,struct gicv5_its_dev * its_dev)89 static void gicv5_its_syncr(struct gicv5_its_chip_data *its,
90 struct gicv5_its_dev *its_dev)
91 {
92 u64 syncr;
93
94 syncr = FIELD_PREP(GICV5_ITS_SYNCR_SYNC, 1) |
95 FIELD_PREP(GICV5_ITS_SYNCR_DEVICEID, its_dev->device_id);
96
97 its_writeq_relaxed(its, syncr, GICV5_ITS_SYNCR);
98
99 gicv5_wait_for_op(its->its_base, GICV5_ITS_SYNC_STATUSR, GICV5_ITS_SYNC_STATUSR_IDLE);
100 }
101
102 /* Number of bits required for each L2 {device/interrupt translation} table size */
103 #define ITS_L2SZ_64K_L2_BITS 13
104 #define ITS_L2SZ_16K_L2_BITS 11
105 #define ITS_L2SZ_4K_L2_BITS 9
106
gicv5_its_l2sz_to_l2_bits(unsigned int sz)107 static unsigned int gicv5_its_l2sz_to_l2_bits(unsigned int sz)
108 {
109 switch (sz) {
110 case GICV5_ITS_DT_ITT_CFGR_L2SZ_64k:
111 return ITS_L2SZ_64K_L2_BITS;
112 case GICV5_ITS_DT_ITT_CFGR_L2SZ_16k:
113 return ITS_L2SZ_16K_L2_BITS;
114 case GICV5_ITS_DT_ITT_CFGR_L2SZ_4k:
115 default:
116 return ITS_L2SZ_4K_L2_BITS;
117 }
118 }
119
gicv5_its_itt_cache_inv(struct gicv5_its_chip_data * its,u32 device_id,u16 event_id)120 static int gicv5_its_itt_cache_inv(struct gicv5_its_chip_data *its, u32 device_id,
121 u16 event_id)
122 {
123 u32 eventr, eidr;
124 u64 didr;
125
126 didr = FIELD_PREP(GICV5_ITS_DIDR_DEVICEID, device_id);
127 eidr = FIELD_PREP(GICV5_ITS_EIDR_EVENTID, event_id);
128 eventr = FIELD_PREP(GICV5_ITS_INV_EVENTR_I, 0x1);
129
130 its_writeq_relaxed(its, didr, GICV5_ITS_DIDR);
131 its_writel_relaxed(its, eidr, GICV5_ITS_EIDR);
132 its_writel_relaxed(its, eventr, GICV5_ITS_INV_EVENTR);
133
134 return gicv5_its_cache_sync(its);
135 }
136
gicv5_its_free_itt_linear(struct gicv5_its_dev * its_dev)137 static void gicv5_its_free_itt_linear(struct gicv5_its_dev *its_dev)
138 {
139 kfree(its_dev->itt_cfg.linear.itt);
140 }
141
gicv5_its_free_itt_two_level(struct gicv5_its_dev * its_dev)142 static void gicv5_its_free_itt_two_level(struct gicv5_its_dev *its_dev)
143 {
144 unsigned int i, num_ents = its_dev->itt_cfg.l2.num_l1_ents;
145
146 for (i = 0; i < num_ents; i++)
147 kfree(its_dev->itt_cfg.l2.l2ptrs[i]);
148
149 kfree(its_dev->itt_cfg.l2.l2ptrs);
150 kfree(its_dev->itt_cfg.l2.l1itt);
151 }
152
gicv5_its_free_itt(struct gicv5_its_dev * its_dev)153 static void gicv5_its_free_itt(struct gicv5_its_dev *its_dev)
154 {
155 if (!its_dev->itt_cfg.l2itt)
156 gicv5_its_free_itt_linear(its_dev);
157 else
158 gicv5_its_free_itt_two_level(its_dev);
159 }
160
gicv5_its_create_itt_linear(struct gicv5_its_chip_data * its,struct gicv5_its_dev * its_dev,unsigned int event_id_bits)161 static int gicv5_its_create_itt_linear(struct gicv5_its_chip_data *its,
162 struct gicv5_its_dev *its_dev,
163 unsigned int event_id_bits)
164 {
165 unsigned int num_ents = BIT(event_id_bits);
166 __le64 *itt;
167
168 itt = kcalloc(num_ents, sizeof(*itt), GFP_KERNEL);
169 if (!itt)
170 return -ENOMEM;
171
172 its_dev->itt_cfg.linear.itt = itt;
173 its_dev->itt_cfg.linear.num_ents = num_ents;
174 its_dev->itt_cfg.l2itt = false;
175 its_dev->itt_cfg.event_id_bits = event_id_bits;
176
177 gicv5_its_dcache_clean(its, itt, num_ents * sizeof(*itt));
178
179 return 0;
180 }
181
182 /*
183 * Allocate a two-level ITT. All ITT entries are allocated in one go, unlike
184 * with the device table. Span may be used to limit the second level table
185 * size, where possible.
186 */
gicv5_its_create_itt_two_level(struct gicv5_its_chip_data * its,struct gicv5_its_dev * its_dev,unsigned int event_id_bits,unsigned int itt_l2sz,unsigned int num_events)187 static int gicv5_its_create_itt_two_level(struct gicv5_its_chip_data *its,
188 struct gicv5_its_dev *its_dev,
189 unsigned int event_id_bits,
190 unsigned int itt_l2sz,
191 unsigned int num_events)
192 {
193 unsigned int l1_bits, l2_bits, span, events_per_l2_table;
194 unsigned int i, complete_tables, final_span, num_ents;
195 __le64 *itt_l1, *itt_l2, **l2ptrs;
196 int ret;
197 u64 val;
198
199 ret = gicv5_its_l2sz_to_l2_bits(itt_l2sz);
200 if (ret >= event_id_bits) {
201 pr_debug("Incorrect l2sz (0x%x) for %u EventID bits. Cannot allocate ITT\n",
202 itt_l2sz, event_id_bits);
203 return -EINVAL;
204 }
205
206 l2_bits = ret;
207
208 l1_bits = event_id_bits - l2_bits;
209
210 num_ents = BIT(l1_bits);
211
212 itt_l1 = kcalloc(num_ents, sizeof(*itt_l1), GFP_KERNEL);
213 if (!itt_l1)
214 return -ENOMEM;
215
216 l2ptrs = kcalloc(num_ents, sizeof(*l2ptrs), GFP_KERNEL);
217 if (!l2ptrs) {
218 kfree(itt_l1);
219 return -ENOMEM;
220 }
221
222 its_dev->itt_cfg.l2.l2ptrs = l2ptrs;
223
224 its_dev->itt_cfg.l2.l2sz = itt_l2sz;
225 its_dev->itt_cfg.l2.l1itt = itt_l1;
226 its_dev->itt_cfg.l2.num_l1_ents = num_ents;
227 its_dev->itt_cfg.l2itt = true;
228 its_dev->itt_cfg.event_id_bits = event_id_bits;
229
230 /*
231 * Need to determine how many entries there are per L2 - this is based
232 * on the number of bits in the table.
233 */
234 events_per_l2_table = BIT(l2_bits);
235 complete_tables = num_events / events_per_l2_table;
236 final_span = order_base_2(num_events % events_per_l2_table);
237
238 for (i = 0; i < num_ents; i++) {
239 size_t l2sz;
240
241 span = i == complete_tables ? final_span : l2_bits;
242
243 itt_l2 = kcalloc(BIT(span), sizeof(*itt_l2), GFP_KERNEL);
244 if (!itt_l2) {
245 ret = -ENOMEM;
246 goto out_free;
247 }
248
249 its_dev->itt_cfg.l2.l2ptrs[i] = itt_l2;
250
251 l2sz = BIT(span) * sizeof(*itt_l2);
252
253 gicv5_its_dcache_clean(its, itt_l2, l2sz);
254
255 val = (virt_to_phys(itt_l2) & GICV5_ITTL1E_L2_ADDR_MASK) |
256 FIELD_PREP(GICV5_ITTL1E_SPAN, span) |
257 FIELD_PREP(GICV5_ITTL1E_VALID, 0x1);
258
259 WRITE_ONCE(itt_l1[i], cpu_to_le64(val));
260 }
261
262 gicv5_its_dcache_clean(its, itt_l1, num_ents * sizeof(*itt_l1));
263
264 return 0;
265
266 out_free:
267 for (i = i - 1; i >= 0; i--)
268 kfree(its_dev->itt_cfg.l2.l2ptrs[i]);
269
270 kfree(its_dev->itt_cfg.l2.l2ptrs);
271 kfree(itt_l1);
272 return ret;
273 }
274
275 /*
276 * Function to check whether the device table or ITT table support
277 * a two-level table and if so depending on the number of id_bits
278 * requested, determine whether a two-level table is required.
279 *
280 * Return the 2-level size value if a two level table is deemed
281 * necessary.
282 */
gicv5_its_l2sz_two_level(bool devtab,u32 its_idr1,u8 id_bits,u8 * sz)283 static bool gicv5_its_l2sz_two_level(bool devtab, u32 its_idr1, u8 id_bits, u8 *sz)
284 {
285 unsigned int l2_bits, l2_sz;
286
287 if (devtab && !FIELD_GET(GICV5_ITS_IDR1_DT_LEVELS, its_idr1))
288 return false;
289
290 if (!devtab && !FIELD_GET(GICV5_ITS_IDR1_ITT_LEVELS, its_idr1))
291 return false;
292
293 /*
294 * Pick an L2 size that matches the pagesize; if a match
295 * is not found, go for the smallest supported l2 size granule.
296 *
297 * This ensures that we will always be able to allocate
298 * contiguous memory at L2.
299 */
300 switch (PAGE_SIZE) {
301 case SZ_64K:
302 if (GICV5_ITS_IDR1_L2SZ_SUPPORT_64KB(its_idr1)) {
303 l2_sz = GICV5_ITS_DT_ITT_CFGR_L2SZ_64k;
304 break;
305 }
306 fallthrough;
307 case SZ_4K:
308 if (GICV5_ITS_IDR1_L2SZ_SUPPORT_4KB(its_idr1)) {
309 l2_sz = GICV5_ITS_DT_ITT_CFGR_L2SZ_4k;
310 break;
311 }
312 fallthrough;
313 case SZ_16K:
314 if (GICV5_ITS_IDR1_L2SZ_SUPPORT_16KB(its_idr1)) {
315 l2_sz = GICV5_ITS_DT_ITT_CFGR_L2SZ_16k;
316 break;
317 }
318 if (GICV5_ITS_IDR1_L2SZ_SUPPORT_4KB(its_idr1)) {
319 l2_sz = GICV5_ITS_DT_ITT_CFGR_L2SZ_4k;
320 break;
321 }
322 if (GICV5_ITS_IDR1_L2SZ_SUPPORT_64KB(its_idr1)) {
323 l2_sz = GICV5_ITS_DT_ITT_CFGR_L2SZ_64k;
324 break;
325 }
326
327 l2_sz = GICV5_ITS_DT_ITT_CFGR_L2SZ_4k;
328 break;
329 }
330
331 l2_bits = gicv5_its_l2sz_to_l2_bits(l2_sz);
332
333 if (l2_bits > id_bits)
334 return false;
335
336 *sz = l2_sz;
337
338 return true;
339 }
340
gicv5_its_device_get_itte_ref(struct gicv5_its_dev * its_dev,u16 event_id)341 static __le64 *gicv5_its_device_get_itte_ref(struct gicv5_its_dev *its_dev,
342 u16 event_id)
343 {
344 unsigned int l1_idx, l2_idx, l2_bits;
345 __le64 *l2_itt;
346
347 if (!its_dev->itt_cfg.l2itt) {
348 __le64 *itt = its_dev->itt_cfg.linear.itt;
349
350 return &itt[event_id];
351 }
352
353 l2_bits = gicv5_its_l2sz_to_l2_bits(its_dev->itt_cfg.l2.l2sz);
354 l1_idx = event_id >> l2_bits;
355 l2_idx = event_id & GENMASK(l2_bits - 1, 0);
356 l2_itt = its_dev->itt_cfg.l2.l2ptrs[l1_idx];
357
358 return &l2_itt[l2_idx];
359 }
360
gicv5_its_device_cache_inv(struct gicv5_its_chip_data * its,struct gicv5_its_dev * its_dev)361 static int gicv5_its_device_cache_inv(struct gicv5_its_chip_data *its,
362 struct gicv5_its_dev *its_dev)
363 {
364 u32 devicer;
365 u64 didr;
366
367 didr = FIELD_PREP(GICV5_ITS_DIDR_DEVICEID, its_dev->device_id);
368 devicer = FIELD_PREP(GICV5_ITS_INV_DEVICER_I, 0x1) |
369 FIELD_PREP(GICV5_ITS_INV_DEVICER_EVENTID_BITS,
370 its_dev->itt_cfg.event_id_bits) |
371 FIELD_PREP(GICV5_ITS_INV_DEVICER_L1, 0x0);
372 its_writeq_relaxed(its, didr, GICV5_ITS_DIDR);
373 its_writel_relaxed(its, devicer, GICV5_ITS_INV_DEVICER);
374
375 return gicv5_its_cache_sync(its);
376 }
377
378 /*
379 * Allocate a level 2 device table entry, update L1 parent to reference it.
380 * Only used for 2-level device tables, and it is called on demand.
381 */
gicv5_its_alloc_l2_devtab(struct gicv5_its_chip_data * its,unsigned int l1_index)382 static int gicv5_its_alloc_l2_devtab(struct gicv5_its_chip_data *its,
383 unsigned int l1_index)
384 {
385 __le64 *l2devtab, *l1devtab = its->devtab_cfgr.l2.l1devtab;
386 u8 span, l2sz, l2_bits;
387 u64 l1dte;
388
389 if (FIELD_GET(GICV5_DTL1E_VALID, le64_to_cpu(l1devtab[l1_index])))
390 return 0;
391
392 span = FIELD_GET(GICV5_DTL1E_SPAN, le64_to_cpu(l1devtab[l1_index]));
393 l2sz = devtab_cfgr_field(its, L2SZ);
394
395 l2_bits = gicv5_its_l2sz_to_l2_bits(l2sz);
396
397 /*
398 * Span allows us to create a smaller L2 device table.
399 * If it is too large, use the number of allowed L2 bits.
400 */
401 if (span > l2_bits)
402 span = l2_bits;
403
404 l2devtab = kcalloc(BIT(span), sizeof(*l2devtab), GFP_KERNEL);
405 if (!l2devtab)
406 return -ENOMEM;
407
408 its->devtab_cfgr.l2.l2ptrs[l1_index] = l2devtab;
409
410 l1dte = FIELD_PREP(GICV5_DTL1E_SPAN, span) |
411 (virt_to_phys(l2devtab) & GICV5_DTL1E_L2_ADDR_MASK) |
412 FIELD_PREP(GICV5_DTL1E_VALID, 0x1);
413 its_write_table_entry(its, &l1devtab[l1_index], l1dte);
414
415 return 0;
416 }
417
gicv5_its_devtab_get_dte_ref(struct gicv5_its_chip_data * its,u32 device_id,bool alloc)418 static __le64 *gicv5_its_devtab_get_dte_ref(struct gicv5_its_chip_data *its,
419 u32 device_id, bool alloc)
420 {
421 u8 str = devtab_cfgr_field(its, STRUCTURE);
422 unsigned int l2sz, l2_bits, l1_idx, l2_idx;
423 __le64 *l2devtab;
424 int ret;
425
426 if (str == GICV5_ITS_DT_ITT_CFGR_STRUCTURE_LINEAR) {
427 l2devtab = its->devtab_cfgr.linear.devtab;
428 return &l2devtab[device_id];
429 }
430
431 l2sz = devtab_cfgr_field(its, L2SZ);
432 l2_bits = gicv5_its_l2sz_to_l2_bits(l2sz);
433 l1_idx = device_id >> l2_bits;
434 l2_idx = device_id & GENMASK(l2_bits - 1, 0);
435
436 if (alloc) {
437 /*
438 * Allocate a new L2 device table here before
439 * continuing. We make the assumption that the span in
440 * the L1 table has been set correctly, and blindly use
441 * that value.
442 */
443 ret = gicv5_its_alloc_l2_devtab(its, l1_idx);
444 if (ret)
445 return NULL;
446 }
447
448 l2devtab = its->devtab_cfgr.l2.l2ptrs[l1_idx];
449 return &l2devtab[l2_idx];
450 }
451
452 /*
453 * Register a new device in the device table. Allocate an ITT and
454 * program the L2DTE entry according to the ITT structure that
455 * was chosen.
456 */
gicv5_its_device_register(struct gicv5_its_chip_data * its,struct gicv5_its_dev * its_dev)457 static int gicv5_its_device_register(struct gicv5_its_chip_data *its,
458 struct gicv5_its_dev *its_dev)
459 {
460 u8 event_id_bits, device_id_bits, itt_struct, itt_l2sz;
461 phys_addr_t itt_phys_base;
462 bool two_level_itt;
463 u32 idr1, idr2;
464 __le64 *dte;
465 u64 val;
466 int ret;
467
468 device_id_bits = devtab_cfgr_field(its, DEVICEID_BITS);
469
470 if (its_dev->device_id >= BIT(device_id_bits)) {
471 pr_err("Supplied DeviceID (%u) outside of Device Table range (%u)!",
472 its_dev->device_id, (u32)GENMASK(device_id_bits - 1, 0));
473 return -EINVAL;
474 }
475
476 dte = gicv5_its_devtab_get_dte_ref(its, its_dev->device_id, true);
477 if (!dte)
478 return -ENOMEM;
479
480 if (FIELD_GET(GICV5_DTL2E_VALID, le64_to_cpu(*dte)))
481 return -EBUSY;
482
483 /*
484 * Determine how many bits we need, validate those against the max.
485 * Based on these, determine if we should go for a 1- or 2-level ITT.
486 */
487 event_id_bits = order_base_2(its_dev->num_events);
488
489 idr2 = its_readl_relaxed(its, GICV5_ITS_IDR2);
490
491 if (event_id_bits > FIELD_GET(GICV5_ITS_IDR2_EVENTID_BITS, idr2)) {
492 pr_err("Required EventID bits (%u) larger than supported bits (%u)!",
493 event_id_bits,
494 (u8)FIELD_GET(GICV5_ITS_IDR2_EVENTID_BITS, idr2));
495 return -EINVAL;
496 }
497
498 idr1 = its_readl_relaxed(its, GICV5_ITS_IDR1);
499
500 /*
501 * L2 ITT size is programmed into the L2DTE regardless of
502 * whether a two-level or linear ITT is built, init it.
503 */
504 itt_l2sz = 0;
505
506 two_level_itt = gicv5_its_l2sz_two_level(false, idr1, event_id_bits,
507 &itt_l2sz);
508 if (two_level_itt)
509 ret = gicv5_its_create_itt_two_level(its, its_dev, event_id_bits,
510 itt_l2sz,
511 its_dev->num_events);
512 else
513 ret = gicv5_its_create_itt_linear(its, its_dev, event_id_bits);
514 if (ret)
515 return ret;
516
517 itt_phys_base = two_level_itt ? virt_to_phys(its_dev->itt_cfg.l2.l1itt) :
518 virt_to_phys(its_dev->itt_cfg.linear.itt);
519
520 itt_struct = two_level_itt ? GICV5_ITS_DT_ITT_CFGR_STRUCTURE_TWO_LEVEL :
521 GICV5_ITS_DT_ITT_CFGR_STRUCTURE_LINEAR;
522
523 val = FIELD_PREP(GICV5_DTL2E_EVENT_ID_BITS, event_id_bits) |
524 FIELD_PREP(GICV5_DTL2E_ITT_STRUCTURE, itt_struct) |
525 (itt_phys_base & GICV5_DTL2E_ITT_ADDR_MASK) |
526 FIELD_PREP(GICV5_DTL2E_ITT_L2SZ, itt_l2sz) |
527 FIELD_PREP(GICV5_DTL2E_VALID, 0x1);
528
529 its_write_table_entry(its, dte, val);
530
531 ret = gicv5_its_device_cache_inv(its, its_dev);
532 if (ret) {
533 its_write_table_entry(its, dte, 0);
534 gicv5_its_free_itt(its_dev);
535 return ret;
536 }
537
538 return 0;
539 }
540
541 /*
542 * Unregister a device in the device table. Lookup the device by ID, free the
543 * corresponding ITT, mark the device as invalid in the device table.
544 */
gicv5_its_device_unregister(struct gicv5_its_chip_data * its,struct gicv5_its_dev * its_dev)545 static int gicv5_its_device_unregister(struct gicv5_its_chip_data *its,
546 struct gicv5_its_dev *its_dev)
547 {
548 __le64 *dte;
549
550 dte = gicv5_its_devtab_get_dte_ref(its, its_dev->device_id, false);
551
552 if (!FIELD_GET(GICV5_DTL2E_VALID, le64_to_cpu(*dte))) {
553 pr_debug("Device table entry for DeviceID 0x%x is not valid. Nothing to clean up!",
554 its_dev->device_id);
555 return -EINVAL;
556 }
557
558 /* Zero everything - make it clear that this is an invalid entry */
559 its_write_table_entry(its, dte, 0);
560
561 gicv5_its_free_itt(its_dev);
562
563 return gicv5_its_device_cache_inv(its, its_dev);
564 }
565
566 /*
567 * Allocate a 1-level device table. All entries are allocated, but marked
568 * invalid.
569 */
gicv5_its_alloc_devtab_linear(struct gicv5_its_chip_data * its,u8 device_id_bits)570 static int gicv5_its_alloc_devtab_linear(struct gicv5_its_chip_data *its,
571 u8 device_id_bits)
572 {
573 __le64 *devtab;
574 size_t sz;
575 u64 baser;
576 u32 cfgr;
577
578 /*
579 * We expect a GICv5 implementation requiring a large number of
580 * deviceID bits to support a 2-level device table. If that's not
581 * the case, cap the number of deviceIDs supported according to the
582 * kmalloc limits so that the system can chug along with a linear
583 * device table.
584 */
585 sz = BIT_ULL(device_id_bits) * sizeof(*devtab);
586 if (sz > KMALLOC_MAX_SIZE) {
587 u8 device_id_cap = ilog2(KMALLOC_MAX_SIZE/sizeof(*devtab));
588
589 pr_warn("Limiting device ID bits from %u to %u\n",
590 device_id_bits, device_id_cap);
591 device_id_bits = device_id_cap;
592 }
593
594 devtab = kcalloc(BIT(device_id_bits), sizeof(*devtab), GFP_KERNEL);
595 if (!devtab)
596 return -ENOMEM;
597
598 gicv5_its_dcache_clean(its, devtab, sz);
599
600 cfgr = FIELD_PREP(GICV5_ITS_DT_CFGR_STRUCTURE,
601 GICV5_ITS_DT_ITT_CFGR_STRUCTURE_LINEAR) |
602 FIELD_PREP(GICV5_ITS_DT_CFGR_L2SZ, 0) |
603 FIELD_PREP(GICV5_ITS_DT_CFGR_DEVICEID_BITS, device_id_bits);
604 its_writel_relaxed(its, cfgr, GICV5_ITS_DT_CFGR);
605
606 baser = virt_to_phys(devtab) & GICV5_ITS_DT_BASER_ADDR_MASK;
607 its_writeq_relaxed(its, baser, GICV5_ITS_DT_BASER);
608
609 its->devtab_cfgr.cfgr = cfgr;
610 its->devtab_cfgr.linear.devtab = devtab;
611
612 return 0;
613 }
614
615 /*
616 * Allocate a 2-level device table. L2 entries are not allocated,
617 * they are allocated on-demand.
618 */
gicv5_its_alloc_devtab_two_level(struct gicv5_its_chip_data * its,u8 device_id_bits,u8 devtab_l2sz)619 static int gicv5_its_alloc_devtab_two_level(struct gicv5_its_chip_data *its,
620 u8 device_id_bits,
621 u8 devtab_l2sz)
622 {
623 unsigned int l1_bits, l2_bits, i;
624 __le64 *l1devtab, **l2ptrs;
625 size_t l1_sz;
626 u64 baser;
627 u32 cfgr;
628
629 l2_bits = gicv5_its_l2sz_to_l2_bits(devtab_l2sz);
630
631 l1_bits = device_id_bits - l2_bits;
632 l1_sz = BIT(l1_bits) * sizeof(*l1devtab);
633 /*
634 * With 2-level device table support it is highly unlikely
635 * that we are not able to allocate the required amount of
636 * device table memory to cover deviceID space; cap the
637 * deviceID space if we encounter such set-up.
638 * If this ever becomes a problem we could revisit the policy
639 * behind level 2 size selection to reduce level-1 deviceID bits.
640 */
641 if (l1_sz > KMALLOC_MAX_SIZE) {
642 l1_bits = ilog2(KMALLOC_MAX_SIZE/sizeof(*l1devtab));
643
644 pr_warn("Limiting device ID bits from %u to %u\n",
645 device_id_bits, l1_bits + l2_bits);
646 device_id_bits = l1_bits + l2_bits;
647 l1_sz = KMALLOC_MAX_SIZE;
648 }
649
650 l1devtab = kcalloc(BIT(l1_bits), sizeof(*l1devtab), GFP_KERNEL);
651 if (!l1devtab)
652 return -ENOMEM;
653
654 l2ptrs = kcalloc(BIT(l1_bits), sizeof(*l2ptrs), GFP_KERNEL);
655 if (!l2ptrs) {
656 kfree(l1devtab);
657 return -ENOMEM;
658 }
659
660 for (i = 0; i < BIT(l1_bits); i++)
661 l1devtab[i] = cpu_to_le64(FIELD_PREP(GICV5_DTL1E_SPAN, l2_bits));
662
663 gicv5_its_dcache_clean(its, l1devtab, l1_sz);
664
665 cfgr = FIELD_PREP(GICV5_ITS_DT_CFGR_STRUCTURE,
666 GICV5_ITS_DT_ITT_CFGR_STRUCTURE_TWO_LEVEL) |
667 FIELD_PREP(GICV5_ITS_DT_CFGR_L2SZ, devtab_l2sz) |
668 FIELD_PREP(GICV5_ITS_DT_CFGR_DEVICEID_BITS, device_id_bits);
669 its_writel_relaxed(its, cfgr, GICV5_ITS_DT_CFGR);
670
671 baser = virt_to_phys(l1devtab) & GICV5_ITS_DT_BASER_ADDR_MASK;
672 its_writeq_relaxed(its, baser, GICV5_ITS_DT_BASER);
673
674 its->devtab_cfgr.cfgr = cfgr;
675 its->devtab_cfgr.l2.l1devtab = l1devtab;
676 its->devtab_cfgr.l2.l2ptrs = l2ptrs;
677
678 return 0;
679 }
680
681 /*
682 * Initialise the device table as either 1- or 2-level depending on what is
683 * supported by the hardware.
684 */
gicv5_its_init_devtab(struct gicv5_its_chip_data * its)685 static int gicv5_its_init_devtab(struct gicv5_its_chip_data *its)
686 {
687 u8 device_id_bits, devtab_l2sz;
688 bool two_level_devtab;
689 u32 idr1;
690
691 idr1 = its_readl_relaxed(its, GICV5_ITS_IDR1);
692
693 device_id_bits = FIELD_GET(GICV5_ITS_IDR1_DEVICEID_BITS, idr1);
694 two_level_devtab = gicv5_its_l2sz_two_level(true, idr1, device_id_bits,
695 &devtab_l2sz);
696 if (two_level_devtab)
697 return gicv5_its_alloc_devtab_two_level(its, device_id_bits,
698 devtab_l2sz);
699 else
700 return gicv5_its_alloc_devtab_linear(its, device_id_bits);
701 }
702
gicv5_its_deinit_devtab(struct gicv5_its_chip_data * its)703 static void gicv5_its_deinit_devtab(struct gicv5_its_chip_data *its)
704 {
705 u8 str = devtab_cfgr_field(its, STRUCTURE);
706
707 if (str == GICV5_ITS_DT_ITT_CFGR_STRUCTURE_LINEAR) {
708 kfree(its->devtab_cfgr.linear.devtab);
709 } else {
710 kfree(its->devtab_cfgr.l2.l1devtab);
711 kfree(its->devtab_cfgr.l2.l2ptrs);
712 }
713 }
714
gicv5_its_compose_msi_msg(struct irq_data * d,struct msi_msg * msg)715 static void gicv5_its_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
716 {
717 struct gicv5_its_dev *its_dev = irq_data_get_irq_chip_data(d);
718 u64 addr = its_dev->its_trans_phys_base;
719
720 msg->data = FIELD_GET(GICV5_ITS_HWIRQ_EVENT_ID, d->hwirq);
721 msi_msg_set_addr(irq_data_get_msi_desc(d), msg, addr);
722 }
723
724 static const struct irq_chip gicv5_its_irq_chip = {
725 .name = "GICv5-ITS-MSI",
726 .irq_mask = irq_chip_mask_parent,
727 .irq_unmask = irq_chip_unmask_parent,
728 .irq_eoi = irq_chip_eoi_parent,
729 .irq_set_affinity = irq_chip_set_affinity_parent,
730 .irq_get_irqchip_state = irq_chip_get_parent_state,
731 .irq_set_irqchip_state = irq_chip_set_parent_state,
732 .irq_compose_msi_msg = gicv5_its_compose_msi_msg,
733 };
734
gicv5_its_find_device(struct gicv5_its_chip_data * its,u32 device_id)735 static struct gicv5_its_dev *gicv5_its_find_device(struct gicv5_its_chip_data *its,
736 u32 device_id)
737 {
738 struct gicv5_its_dev *dev = xa_load(&its->its_devices, device_id);
739
740 return dev ? dev : ERR_PTR(-ENODEV);
741 }
742
gicv5_its_alloc_device(struct gicv5_its_chip_data * its,int nvec,u32 dev_id)743 static struct gicv5_its_dev *gicv5_its_alloc_device(struct gicv5_its_chip_data *its, int nvec,
744 u32 dev_id)
745 {
746 struct gicv5_its_dev *its_dev;
747 void *entry;
748 int ret;
749
750 its_dev = gicv5_its_find_device(its, dev_id);
751 if (!IS_ERR(its_dev)) {
752 pr_err("A device with this DeviceID (0x%x) has already been registered.\n",
753 dev_id);
754
755 return ERR_PTR(-EBUSY);
756 }
757
758 its_dev = kzalloc(sizeof(*its_dev), GFP_KERNEL);
759 if (!its_dev)
760 return ERR_PTR(-ENOMEM);
761
762 its_dev->device_id = dev_id;
763 its_dev->num_events = nvec;
764
765 ret = gicv5_its_device_register(its, its_dev);
766 if (ret) {
767 pr_err("Failed to register the device\n");
768 goto out_dev_free;
769 }
770
771 gicv5_its_device_cache_inv(its, its_dev);
772
773 its_dev->its_node = its;
774
775 its_dev->event_map = (unsigned long *)bitmap_zalloc(its_dev->num_events, GFP_KERNEL);
776 if (!its_dev->event_map) {
777 ret = -ENOMEM;
778 goto out_unregister;
779 }
780
781 entry = xa_store(&its->its_devices, dev_id, its_dev, GFP_KERNEL);
782 if (xa_is_err(entry)) {
783 ret = xa_err(entry);
784 goto out_bitmap_free;
785 }
786
787 return its_dev;
788
789 out_bitmap_free:
790 bitmap_free(its_dev->event_map);
791 out_unregister:
792 gicv5_its_device_unregister(its, its_dev);
793 out_dev_free:
794 kfree(its_dev);
795 return ERR_PTR(ret);
796 }
797
gicv5_its_msi_prepare(struct irq_domain * domain,struct device * dev,int nvec,msi_alloc_info_t * info)798 static int gicv5_its_msi_prepare(struct irq_domain *domain, struct device *dev,
799 int nvec, msi_alloc_info_t *info)
800 {
801 u32 dev_id = info->scratchpad[0].ul;
802 struct msi_domain_info *msi_info;
803 struct gicv5_its_chip_data *its;
804 struct gicv5_its_dev *its_dev;
805
806 msi_info = msi_get_domain_info(domain);
807 its = msi_info->data;
808
809 guard(mutex)(&its->dev_alloc_lock);
810
811 its_dev = gicv5_its_alloc_device(its, nvec, dev_id);
812 if (IS_ERR(its_dev))
813 return PTR_ERR(its_dev);
814
815 its_dev->its_trans_phys_base = info->scratchpad[1].ul;
816 info->scratchpad[0].ptr = its_dev;
817
818 return 0;
819 }
820
gicv5_its_msi_teardown(struct irq_domain * domain,msi_alloc_info_t * info)821 static void gicv5_its_msi_teardown(struct irq_domain *domain, msi_alloc_info_t *info)
822 {
823 struct gicv5_its_dev *its_dev = info->scratchpad[0].ptr;
824 struct msi_domain_info *msi_info;
825 struct gicv5_its_chip_data *its;
826
827 msi_info = msi_get_domain_info(domain);
828 its = msi_info->data;
829
830 guard(mutex)(&its->dev_alloc_lock);
831
832 if (WARN_ON_ONCE(!bitmap_empty(its_dev->event_map, its_dev->num_events)))
833 return;
834
835 xa_erase(&its->its_devices, its_dev->device_id);
836 bitmap_free(its_dev->event_map);
837 gicv5_its_device_unregister(its, its_dev);
838 kfree(its_dev);
839 }
840
841 static struct msi_domain_ops gicv5_its_msi_domain_ops = {
842 .msi_prepare = gicv5_its_msi_prepare,
843 .msi_teardown = gicv5_its_msi_teardown,
844 };
845
gicv5_its_map_event(struct gicv5_its_dev * its_dev,u16 event_id,u32 lpi)846 static int gicv5_its_map_event(struct gicv5_its_dev *its_dev, u16 event_id, u32 lpi)
847 {
848 struct gicv5_its_chip_data *its = its_dev->its_node;
849 u64 itt_entry;
850 __le64 *itte;
851
852 itte = gicv5_its_device_get_itte_ref(its_dev, event_id);
853
854 if (FIELD_GET(GICV5_ITTL2E_VALID, *itte))
855 return -EEXIST;
856
857 itt_entry = FIELD_PREP(GICV5_ITTL2E_LPI_ID, lpi) |
858 FIELD_PREP(GICV5_ITTL2E_VALID, 0x1);
859
860 its_write_table_entry(its, itte, itt_entry);
861
862 gicv5_its_itt_cache_inv(its, its_dev->device_id, event_id);
863
864 return 0;
865 }
866
gicv5_its_unmap_event(struct gicv5_its_dev * its_dev,u16 event_id)867 static void gicv5_its_unmap_event(struct gicv5_its_dev *its_dev, u16 event_id)
868 {
869 struct gicv5_its_chip_data *its = its_dev->its_node;
870 u64 itte_val;
871 __le64 *itte;
872
873 itte = gicv5_its_device_get_itte_ref(its_dev, event_id);
874
875 itte_val = le64_to_cpu(*itte);
876 itte_val &= ~GICV5_ITTL2E_VALID;
877
878 its_write_table_entry(its, itte, itte_val);
879
880 gicv5_its_itt_cache_inv(its, its_dev->device_id, event_id);
881 }
882
gicv5_its_alloc_eventid(struct gicv5_its_dev * its_dev,msi_alloc_info_t * info,unsigned int nr_irqs,u32 * eventid)883 static int gicv5_its_alloc_eventid(struct gicv5_its_dev *its_dev, msi_alloc_info_t *info,
884 unsigned int nr_irqs, u32 *eventid)
885 {
886 int event_id_base;
887
888 if (!(info->flags & MSI_ALLOC_FLAGS_FIXED_MSG_DATA)) {
889 event_id_base = bitmap_find_free_region(its_dev->event_map,
890 its_dev->num_events,
891 get_count_order(nr_irqs));
892 if (event_id_base < 0)
893 return event_id_base;
894 } else {
895 /*
896 * We want to have a fixed EventID mapped for hardcoded
897 * message data allocations.
898 */
899 if (WARN_ON_ONCE(nr_irqs != 1))
900 return -EINVAL;
901
902 event_id_base = info->hwirq;
903
904 if (event_id_base >= its_dev->num_events) {
905 pr_err("EventID ouside of ITT range; cannot allocate an ITT entry!\n");
906
907 return -EINVAL;
908 }
909
910 if (test_and_set_bit(event_id_base, its_dev->event_map)) {
911 pr_warn("Can't reserve event_id bitmap\n");
912 return -EINVAL;
913
914 }
915 }
916
917 *eventid = event_id_base;
918
919 return 0;
920 }
921
gicv5_its_free_eventid(struct gicv5_its_dev * its_dev,u32 event_id_base,unsigned int nr_irqs)922 static void gicv5_its_free_eventid(struct gicv5_its_dev *its_dev, u32 event_id_base,
923 unsigned int nr_irqs)
924 {
925 bitmap_release_region(its_dev->event_map, event_id_base,
926 get_count_order(nr_irqs));
927 }
928
gicv5_its_irq_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * arg)929 static int gicv5_its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
930 unsigned int nr_irqs, void *arg)
931 {
932 u32 device_id, event_id_base, lpi;
933 struct gicv5_its_dev *its_dev;
934 msi_alloc_info_t *info = arg;
935 irq_hw_number_t hwirq;
936 struct irq_data *irqd;
937 int ret, i;
938
939 its_dev = info->scratchpad[0].ptr;
940
941 ret = gicv5_its_alloc_eventid(its_dev, info, nr_irqs, &event_id_base);
942 if (ret)
943 return ret;
944
945 ret = iommu_dma_prepare_msi(info->desc, its_dev->its_trans_phys_base);
946 if (ret)
947 goto out_eventid;
948
949 device_id = its_dev->device_id;
950
951 for (i = 0; i < nr_irqs; i++) {
952 lpi = gicv5_alloc_lpi();
953 if (ret < 0) {
954 pr_debug("Failed to find free LPI!\n");
955 goto out_eventid;
956 }
957
958 ret = irq_domain_alloc_irqs_parent(domain, virq + i, 1, &lpi);
959 if (ret)
960 goto out_free_lpi;
961
962 /*
963 * Store eventid and deviceid into the hwirq for later use.
964 *
965 * hwirq = event_id << 32 | device_id
966 */
967 hwirq = FIELD_PREP(GICV5_ITS_HWIRQ_DEVICE_ID, device_id) |
968 FIELD_PREP(GICV5_ITS_HWIRQ_EVENT_ID, (u64)event_id_base + i);
969 irq_domain_set_info(domain, virq + i, hwirq,
970 &gicv5_its_irq_chip, its_dev,
971 handle_fasteoi_irq, NULL, NULL);
972
973 irqd = irq_get_irq_data(virq + i);
974 irqd_set_single_target(irqd);
975 irqd_set_affinity_on_activate(irqd);
976 }
977
978 return 0;
979
980 out_free_lpi:
981 gicv5_free_lpi(lpi);
982 out_eventid:
983 gicv5_its_free_eventid(its_dev, event_id_base, nr_irqs);
984 return ret;
985 }
986
gicv5_its_irq_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)987 static void gicv5_its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
988 unsigned int nr_irqs)
989 {
990 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
991 struct gicv5_its_chip_data *its;
992 struct gicv5_its_dev *its_dev;
993 u16 event_id_base;
994 unsigned int i;
995
996 its_dev = irq_data_get_irq_chip_data(d);
997 its = its_dev->its_node;
998
999 event_id_base = FIELD_GET(GICV5_ITS_HWIRQ_EVENT_ID, d->hwirq);
1000
1001 bitmap_release_region(its_dev->event_map, event_id_base,
1002 get_count_order(nr_irqs));
1003
1004 /* Hierarchically free irq data */
1005 for (i = 0; i < nr_irqs; i++) {
1006 d = irq_domain_get_irq_data(domain, virq + i);
1007
1008 gicv5_free_lpi(d->parent_data->hwirq);
1009 irq_domain_reset_irq_data(d);
1010 irq_domain_free_irqs_parent(domain, virq + i, 1);
1011 }
1012
1013 gicv5_its_syncr(its, its_dev);
1014 gicv5_irs_syncr();
1015 }
1016
gicv5_its_irq_domain_activate(struct irq_domain * domain,struct irq_data * d,bool reserve)1017 static int gicv5_its_irq_domain_activate(struct irq_domain *domain, struct irq_data *d,
1018 bool reserve)
1019 {
1020 struct gicv5_its_dev *its_dev = irq_data_get_irq_chip_data(d);
1021 u16 event_id;
1022 u32 lpi;
1023
1024 event_id = FIELD_GET(GICV5_ITS_HWIRQ_EVENT_ID, d->hwirq);
1025 lpi = d->parent_data->hwirq;
1026
1027 return gicv5_its_map_event(its_dev, event_id, lpi);
1028 }
1029
gicv5_its_irq_domain_deactivate(struct irq_domain * domain,struct irq_data * d)1030 static void gicv5_its_irq_domain_deactivate(struct irq_domain *domain,
1031 struct irq_data *d)
1032 {
1033 struct gicv5_its_dev *its_dev = irq_data_get_irq_chip_data(d);
1034 u16 event_id;
1035
1036 event_id = FIELD_GET(GICV5_ITS_HWIRQ_EVENT_ID, d->hwirq);
1037
1038 gicv5_its_unmap_event(its_dev, event_id);
1039 }
1040
1041 static const struct irq_domain_ops gicv5_its_irq_domain_ops = {
1042 .alloc = gicv5_its_irq_domain_alloc,
1043 .free = gicv5_its_irq_domain_free,
1044 .activate = gicv5_its_irq_domain_activate,
1045 .deactivate = gicv5_its_irq_domain_deactivate,
1046 .select = msi_lib_irq_domain_select,
1047 };
1048
gicv5_its_write_cr0(struct gicv5_its_chip_data * its,bool enable)1049 static int gicv5_its_write_cr0(struct gicv5_its_chip_data *its, bool enable)
1050 {
1051 u32 cr0 = FIELD_PREP(GICV5_ITS_CR0_ITSEN, enable);
1052
1053 its_writel_relaxed(its, cr0, GICV5_ITS_CR0);
1054 return gicv5_wait_for_op_atomic(its->its_base, GICV5_ITS_CR0,
1055 GICV5_ITS_CR0_IDLE, NULL);
1056 }
1057
gicv5_its_enable(struct gicv5_its_chip_data * its)1058 static int gicv5_its_enable(struct gicv5_its_chip_data *its)
1059 {
1060 return gicv5_its_write_cr0(its, true);
1061 }
1062
gicv5_its_disable(struct gicv5_its_chip_data * its)1063 static int gicv5_its_disable(struct gicv5_its_chip_data *its)
1064 {
1065 return gicv5_its_write_cr0(its, false);
1066 }
1067
gicv5_its_print_info(struct gicv5_its_chip_data * its_node)1068 static void gicv5_its_print_info(struct gicv5_its_chip_data *its_node)
1069 {
1070 bool devtab_linear;
1071 u8 device_id_bits;
1072 u8 str;
1073
1074 device_id_bits = devtab_cfgr_field(its_node, DEVICEID_BITS);
1075
1076 str = devtab_cfgr_field(its_node, STRUCTURE);
1077 devtab_linear = (str == GICV5_ITS_DT_ITT_CFGR_STRUCTURE_LINEAR);
1078
1079 pr_info("ITS %s enabled using %s device table device_id_bits %u\n",
1080 fwnode_get_name(its_node->fwnode),
1081 devtab_linear ? "linear" : "2-level",
1082 device_id_bits);
1083 }
1084
gicv5_its_init_domain(struct gicv5_its_chip_data * its,struct irq_domain * parent)1085 static int gicv5_its_init_domain(struct gicv5_its_chip_data *its, struct irq_domain *parent)
1086 {
1087 struct irq_domain_info dom_info = {
1088 .fwnode = its->fwnode,
1089 .ops = &gicv5_its_irq_domain_ops,
1090 .domain_flags = its->msi_domain_flags,
1091 .parent = parent,
1092 };
1093 struct msi_domain_info *info;
1094
1095 info = kzalloc(sizeof(*info), GFP_KERNEL);
1096 if (!info)
1097 return -ENOMEM;
1098
1099 info->ops = &gicv5_its_msi_domain_ops;
1100 info->data = its;
1101 dom_info.host_data = info;
1102
1103 if (!msi_create_parent_irq_domain(&dom_info, &gic_v5_its_msi_parent_ops)) {
1104 kfree(info);
1105 return -ENOMEM;
1106 }
1107
1108 return 0;
1109 }
1110
gicv5_its_init_bases(void __iomem * its_base,struct fwnode_handle * handle,struct irq_domain * parent_domain)1111 static int __init gicv5_its_init_bases(void __iomem *its_base, struct fwnode_handle *handle,
1112 struct irq_domain *parent_domain)
1113 {
1114 struct device_node *np = to_of_node(handle);
1115 struct gicv5_its_chip_data *its_node;
1116 u32 cr0, cr1;
1117 bool enabled;
1118 int ret;
1119
1120 its_node = kzalloc(sizeof(*its_node), GFP_KERNEL);
1121 if (!its_node)
1122 return -ENOMEM;
1123
1124 mutex_init(&its_node->dev_alloc_lock);
1125 xa_init(&its_node->its_devices);
1126 its_node->fwnode = handle;
1127 its_node->its_base = its_base;
1128 its_node->msi_domain_flags = IRQ_DOMAIN_FLAG_ISOLATED_MSI |
1129 IRQ_DOMAIN_FLAG_FWNODE_PARENT;
1130
1131 cr0 = its_readl_relaxed(its_node, GICV5_ITS_CR0);
1132 enabled = FIELD_GET(GICV5_ITS_CR0_ITSEN, cr0);
1133 if (WARN(enabled, "ITS %s enabled, disabling it before proceeding\n", np->full_name)) {
1134 ret = gicv5_its_disable(its_node);
1135 if (ret)
1136 goto out_free_node;
1137 }
1138
1139 if (of_property_read_bool(np, "dma-noncoherent")) {
1140 /*
1141 * A non-coherent ITS implies that some cache levels cannot be
1142 * used coherently by the cores and GIC. Our only option is to mark
1143 * memory attributes for the GIC as non-cacheable; by default,
1144 * non-cacheable memory attributes imply outer-shareable
1145 * shareability, the value written into ITS_CR1_SH is ignored.
1146 */
1147 cr1 = FIELD_PREP(GICV5_ITS_CR1_ITT_RA, GICV5_NO_READ_ALLOC) |
1148 FIELD_PREP(GICV5_ITS_CR1_DT_RA, GICV5_NO_READ_ALLOC) |
1149 FIELD_PREP(GICV5_ITS_CR1_IC, GICV5_NON_CACHE) |
1150 FIELD_PREP(GICV5_ITS_CR1_OC, GICV5_NON_CACHE);
1151 its_node->flags |= ITS_FLAGS_NON_COHERENT;
1152 } else {
1153 cr1 = FIELD_PREP(GICV5_ITS_CR1_ITT_RA, GICV5_READ_ALLOC) |
1154 FIELD_PREP(GICV5_ITS_CR1_DT_RA, GICV5_READ_ALLOC) |
1155 FIELD_PREP(GICV5_ITS_CR1_IC, GICV5_WB_CACHE) |
1156 FIELD_PREP(GICV5_ITS_CR1_OC, GICV5_WB_CACHE) |
1157 FIELD_PREP(GICV5_ITS_CR1_SH, GICV5_INNER_SHARE);
1158 }
1159
1160 its_writel_relaxed(its_node, cr1, GICV5_ITS_CR1);
1161
1162 ret = gicv5_its_init_devtab(its_node);
1163 if (ret)
1164 goto out_free_node;
1165
1166 ret = gicv5_its_enable(its_node);
1167 if (ret)
1168 goto out_free_devtab;
1169
1170 ret = gicv5_its_init_domain(its_node, parent_domain);
1171 if (ret)
1172 goto out_disable_its;
1173
1174 gicv5_its_print_info(its_node);
1175
1176 return 0;
1177
1178 out_disable_its:
1179 gicv5_its_disable(its_node);
1180 out_free_devtab:
1181 gicv5_its_deinit_devtab(its_node);
1182 out_free_node:
1183 kfree(its_node);
1184 return ret;
1185 }
1186
gicv5_its_init(struct device_node * node)1187 static int __init gicv5_its_init(struct device_node *node)
1188 {
1189 void __iomem *its_base;
1190 int ret, idx;
1191
1192 idx = of_property_match_string(node, "reg-names", "ns-config");
1193 if (idx < 0) {
1194 pr_err("%pOF: ns-config reg-name not present\n", node);
1195 return -ENODEV;
1196 }
1197
1198 its_base = of_io_request_and_map(node, idx, of_node_full_name(node));
1199 if (IS_ERR(its_base)) {
1200 pr_err("%pOF: unable to map GICv5 ITS_CONFIG_FRAME\n", node);
1201 return PTR_ERR(its_base);
1202 }
1203
1204 ret = gicv5_its_init_bases(its_base, of_fwnode_handle(node),
1205 gicv5_global_data.lpi_domain);
1206 if (ret)
1207 goto out_unmap;
1208
1209 return 0;
1210
1211 out_unmap:
1212 iounmap(its_base);
1213 return ret;
1214 }
1215
gicv5_its_of_probe(struct device_node * parent)1216 void __init gicv5_its_of_probe(struct device_node *parent)
1217 {
1218 struct device_node *np;
1219
1220 for_each_available_child_of_node(parent, np) {
1221 if (!of_device_is_compatible(np, "arm,gic-v5-its"))
1222 continue;
1223
1224 if (gicv5_its_init(np))
1225 pr_err("Failed to init ITS %s\n", np->full_name);
1226 }
1227 }
1228