1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2024-2025 ARM Limited, All Rights Reserved.
4 */
5
6 #define pr_fmt(fmt) "GICv5 ITS: " fmt
7
8 #include <linux/acpi.h>
9 #include <linux/acpi_iort.h>
10 #include <linux/bitmap.h>
11 #include <linux/iommu.h>
12 #include <linux/init.h>
13 #include <linux/kernel.h>
14 #include <linux/msi.h>
15 #include <linux/of.h>
16 #include <linux/of_address.h>
17 #include <linux/of_irq.h>
18 #include <linux/slab.h>
19
20 #include <linux/irqchip.h>
21 #include <linux/irqchip/arm-gic-v5.h>
22 #include <linux/irqchip/irq-msi-lib.h>
23
24 #include "irq-gic-its-msi-parent.h"
25
26 #define ITS_FLAGS_NON_COHERENT BIT(0)
27
28 struct gicv5_its_chip_data {
29 struct xarray its_devices;
30 struct mutex dev_alloc_lock;
31 struct fwnode_handle *fwnode;
32 struct gicv5_its_devtab_cfg devtab_cfgr;
33 void __iomem *its_base;
34 u32 flags;
35 unsigned int msi_domain_flags;
36 };
37
38 struct gicv5_its_dev {
39 struct gicv5_its_chip_data *its_node;
40 struct gicv5_its_itt_cfg itt_cfg;
41 unsigned long *event_map;
42 u32 device_id;
43 u32 num_events;
44 phys_addr_t its_trans_phys_base;
45 };
46
its_readl_relaxed(struct gicv5_its_chip_data * its_node,const u32 reg_offset)47 static u32 its_readl_relaxed(struct gicv5_its_chip_data *its_node, const u32 reg_offset)
48 {
49 return readl_relaxed(its_node->its_base + reg_offset);
50 }
51
its_writel_relaxed(struct gicv5_its_chip_data * its_node,const u32 val,const u32 reg_offset)52 static void its_writel_relaxed(struct gicv5_its_chip_data *its_node, const u32 val,
53 const u32 reg_offset)
54 {
55 writel_relaxed(val, its_node->its_base + reg_offset);
56 }
57
its_writeq_relaxed(struct gicv5_its_chip_data * its_node,const u64 val,const u32 reg_offset)58 static void its_writeq_relaxed(struct gicv5_its_chip_data *its_node, const u64 val,
59 const u32 reg_offset)
60 {
61 writeq_relaxed(val, its_node->its_base + reg_offset);
62 }
63
gicv5_its_dcache_clean(struct gicv5_its_chip_data * its,void * start,size_t sz)64 static void gicv5_its_dcache_clean(struct gicv5_its_chip_data *its, void *start,
65 size_t sz)
66 {
67 void *end = start + sz;
68
69 if (its->flags & ITS_FLAGS_NON_COHERENT)
70 dcache_clean_inval_poc((unsigned long)start, (unsigned long)end);
71 else
72 dsb(ishst);
73 }
74
its_write_table_entry(struct gicv5_its_chip_data * its,__le64 * entry,u64 val)75 static void its_write_table_entry(struct gicv5_its_chip_data *its, __le64 *entry,
76 u64 val)
77 {
78 WRITE_ONCE(*entry, cpu_to_le64(val));
79 gicv5_its_dcache_clean(its, entry, sizeof(*entry));
80 }
81
82 #define devtab_cfgr_field(its, f) \
83 FIELD_GET(GICV5_ITS_DT_CFGR_##f, (its)->devtab_cfgr.cfgr)
84
gicv5_its_cache_sync(struct gicv5_its_chip_data * its)85 static int gicv5_its_cache_sync(struct gicv5_its_chip_data *its)
86 {
87 return gicv5_wait_for_op_atomic(its->its_base, GICV5_ITS_STATUSR,
88 GICV5_ITS_STATUSR_IDLE, NULL);
89 }
90
gicv5_its_syncr(struct gicv5_its_chip_data * its,struct gicv5_its_dev * its_dev)91 static void gicv5_its_syncr(struct gicv5_its_chip_data *its,
92 struct gicv5_its_dev *its_dev)
93 {
94 u64 syncr;
95
96 syncr = FIELD_PREP(GICV5_ITS_SYNCR_SYNC, 1) |
97 FIELD_PREP(GICV5_ITS_SYNCR_DEVICEID, its_dev->device_id);
98
99 its_writeq_relaxed(its, syncr, GICV5_ITS_SYNCR);
100
101 gicv5_wait_for_op(its->its_base, GICV5_ITS_SYNC_STATUSR, GICV5_ITS_SYNC_STATUSR_IDLE);
102 }
103
104 /* Number of bits required for each L2 {device/interrupt translation} table size */
105 #define ITS_L2SZ_64K_L2_BITS 13
106 #define ITS_L2SZ_16K_L2_BITS 11
107 #define ITS_L2SZ_4K_L2_BITS 9
108
gicv5_its_l2sz_to_l2_bits(unsigned int sz)109 static unsigned int gicv5_its_l2sz_to_l2_bits(unsigned int sz)
110 {
111 switch (sz) {
112 case GICV5_ITS_DT_ITT_CFGR_L2SZ_64k:
113 return ITS_L2SZ_64K_L2_BITS;
114 case GICV5_ITS_DT_ITT_CFGR_L2SZ_16k:
115 return ITS_L2SZ_16K_L2_BITS;
116 case GICV5_ITS_DT_ITT_CFGR_L2SZ_4k:
117 default:
118 return ITS_L2SZ_4K_L2_BITS;
119 }
120 }
121
gicv5_its_itt_cache_inv(struct gicv5_its_chip_data * its,u32 device_id,u16 event_id)122 static int gicv5_its_itt_cache_inv(struct gicv5_its_chip_data *its, u32 device_id,
123 u16 event_id)
124 {
125 u32 eventr, eidr;
126 u64 didr;
127
128 didr = FIELD_PREP(GICV5_ITS_DIDR_DEVICEID, device_id);
129 eidr = FIELD_PREP(GICV5_ITS_EIDR_EVENTID, event_id);
130 eventr = FIELD_PREP(GICV5_ITS_INV_EVENTR_I, 0x1);
131
132 its_writeq_relaxed(its, didr, GICV5_ITS_DIDR);
133 its_writel_relaxed(its, eidr, GICV5_ITS_EIDR);
134 its_writel_relaxed(its, eventr, GICV5_ITS_INV_EVENTR);
135
136 return gicv5_its_cache_sync(its);
137 }
138
gicv5_its_free_itt_linear(struct gicv5_its_dev * its_dev)139 static void gicv5_its_free_itt_linear(struct gicv5_its_dev *its_dev)
140 {
141 kfree(its_dev->itt_cfg.linear.itt);
142 }
143
gicv5_its_free_itt_two_level(struct gicv5_its_dev * its_dev)144 static void gicv5_its_free_itt_two_level(struct gicv5_its_dev *its_dev)
145 {
146 unsigned int i, num_ents = its_dev->itt_cfg.l2.num_l1_ents;
147
148 for (i = 0; i < num_ents; i++)
149 kfree(its_dev->itt_cfg.l2.l2ptrs[i]);
150
151 kfree(its_dev->itt_cfg.l2.l2ptrs);
152 kfree(its_dev->itt_cfg.l2.l1itt);
153 }
154
gicv5_its_free_itt(struct gicv5_its_dev * its_dev)155 static void gicv5_its_free_itt(struct gicv5_its_dev *its_dev)
156 {
157 if (!its_dev->itt_cfg.l2itt)
158 gicv5_its_free_itt_linear(its_dev);
159 else
160 gicv5_its_free_itt_two_level(its_dev);
161 }
162
gicv5_its_create_itt_linear(struct gicv5_its_chip_data * its,struct gicv5_its_dev * its_dev,unsigned int event_id_bits)163 static int gicv5_its_create_itt_linear(struct gicv5_its_chip_data *its,
164 struct gicv5_its_dev *its_dev,
165 unsigned int event_id_bits)
166 {
167 unsigned int num_ents = BIT(event_id_bits);
168 __le64 *itt;
169
170 itt = kcalloc(num_ents, sizeof(*itt), GFP_KERNEL);
171 if (!itt)
172 return -ENOMEM;
173
174 its_dev->itt_cfg.linear.itt = itt;
175 its_dev->itt_cfg.linear.num_ents = num_ents;
176 its_dev->itt_cfg.l2itt = false;
177 its_dev->itt_cfg.event_id_bits = event_id_bits;
178
179 gicv5_its_dcache_clean(its, itt, num_ents * sizeof(*itt));
180
181 return 0;
182 }
183
184 /*
185 * Allocate a two-level ITT. All ITT entries are allocated in one go, unlike
186 * with the device table. Span may be used to limit the second level table
187 * size, where possible.
188 */
gicv5_its_create_itt_two_level(struct gicv5_its_chip_data * its,struct gicv5_its_dev * its_dev,unsigned int event_id_bits,unsigned int itt_l2sz,unsigned int num_events)189 static int gicv5_its_create_itt_two_level(struct gicv5_its_chip_data *its,
190 struct gicv5_its_dev *its_dev,
191 unsigned int event_id_bits,
192 unsigned int itt_l2sz,
193 unsigned int num_events)
194 {
195 unsigned int l1_bits, l2_bits, span, events_per_l2_table;
196 unsigned int complete_tables, final_span, num_ents;
197 __le64 *itt_l1, *itt_l2, **l2ptrs;
198 int i, ret;
199 u64 val;
200
201 ret = gicv5_its_l2sz_to_l2_bits(itt_l2sz);
202 if (ret >= event_id_bits) {
203 pr_debug("Incorrect l2sz (0x%x) for %u EventID bits. Cannot allocate ITT\n",
204 itt_l2sz, event_id_bits);
205 return -EINVAL;
206 }
207
208 l2_bits = ret;
209
210 l1_bits = event_id_bits - l2_bits;
211
212 num_ents = BIT(l1_bits);
213
214 itt_l1 = kcalloc(num_ents, sizeof(*itt_l1), GFP_KERNEL);
215 if (!itt_l1)
216 return -ENOMEM;
217
218 l2ptrs = kcalloc(num_ents, sizeof(*l2ptrs), GFP_KERNEL);
219 if (!l2ptrs) {
220 kfree(itt_l1);
221 return -ENOMEM;
222 }
223
224 its_dev->itt_cfg.l2.l2ptrs = l2ptrs;
225
226 its_dev->itt_cfg.l2.l2sz = itt_l2sz;
227 its_dev->itt_cfg.l2.l1itt = itt_l1;
228 its_dev->itt_cfg.l2.num_l1_ents = num_ents;
229 its_dev->itt_cfg.l2itt = true;
230 its_dev->itt_cfg.event_id_bits = event_id_bits;
231
232 /*
233 * Need to determine how many entries there are per L2 - this is based
234 * on the number of bits in the table.
235 */
236 events_per_l2_table = BIT(l2_bits);
237 complete_tables = num_events / events_per_l2_table;
238 final_span = order_base_2(num_events % events_per_l2_table);
239
240 for (i = 0; i < num_ents; i++) {
241 size_t l2sz;
242
243 span = i == complete_tables ? final_span : l2_bits;
244
245 itt_l2 = kcalloc(BIT(span), sizeof(*itt_l2), GFP_KERNEL);
246 if (!itt_l2) {
247 ret = -ENOMEM;
248 goto out_free;
249 }
250
251 its_dev->itt_cfg.l2.l2ptrs[i] = itt_l2;
252
253 l2sz = BIT(span) * sizeof(*itt_l2);
254
255 gicv5_its_dcache_clean(its, itt_l2, l2sz);
256
257 val = (virt_to_phys(itt_l2) & GICV5_ITTL1E_L2_ADDR_MASK) |
258 FIELD_PREP(GICV5_ITTL1E_SPAN, span) |
259 FIELD_PREP(GICV5_ITTL1E_VALID, 0x1);
260
261 WRITE_ONCE(itt_l1[i], cpu_to_le64(val));
262 }
263
264 gicv5_its_dcache_clean(its, itt_l1, num_ents * sizeof(*itt_l1));
265
266 return 0;
267
268 out_free:
269 for (i = i - 1; i >= 0; i--)
270 kfree(its_dev->itt_cfg.l2.l2ptrs[i]);
271
272 kfree(its_dev->itt_cfg.l2.l2ptrs);
273 kfree(itt_l1);
274 return ret;
275 }
276
277 /*
278 * Function to check whether the device table or ITT table support
279 * a two-level table and if so depending on the number of id_bits
280 * requested, determine whether a two-level table is required.
281 *
282 * Return the 2-level size value if a two level table is deemed
283 * necessary.
284 */
gicv5_its_l2sz_two_level(bool devtab,u32 its_idr1,u8 id_bits,u8 * sz)285 static bool gicv5_its_l2sz_two_level(bool devtab, u32 its_idr1, u8 id_bits, u8 *sz)
286 {
287 unsigned int l2_bits, l2_sz;
288
289 if (devtab && !FIELD_GET(GICV5_ITS_IDR1_DT_LEVELS, its_idr1))
290 return false;
291
292 if (!devtab && !FIELD_GET(GICV5_ITS_IDR1_ITT_LEVELS, its_idr1))
293 return false;
294
295 /*
296 * Pick an L2 size that matches the pagesize; if a match
297 * is not found, go for the smallest supported l2 size granule.
298 *
299 * This ensures that we will always be able to allocate
300 * contiguous memory at L2.
301 */
302 switch (PAGE_SIZE) {
303 case SZ_64K:
304 if (GICV5_ITS_IDR1_L2SZ_SUPPORT_64KB(its_idr1)) {
305 l2_sz = GICV5_ITS_DT_ITT_CFGR_L2SZ_64k;
306 break;
307 }
308 fallthrough;
309 case SZ_4K:
310 if (GICV5_ITS_IDR1_L2SZ_SUPPORT_4KB(its_idr1)) {
311 l2_sz = GICV5_ITS_DT_ITT_CFGR_L2SZ_4k;
312 break;
313 }
314 fallthrough;
315 case SZ_16K:
316 if (GICV5_ITS_IDR1_L2SZ_SUPPORT_16KB(its_idr1)) {
317 l2_sz = GICV5_ITS_DT_ITT_CFGR_L2SZ_16k;
318 break;
319 }
320 if (GICV5_ITS_IDR1_L2SZ_SUPPORT_4KB(its_idr1)) {
321 l2_sz = GICV5_ITS_DT_ITT_CFGR_L2SZ_4k;
322 break;
323 }
324 if (GICV5_ITS_IDR1_L2SZ_SUPPORT_64KB(its_idr1)) {
325 l2_sz = GICV5_ITS_DT_ITT_CFGR_L2SZ_64k;
326 break;
327 }
328
329 l2_sz = GICV5_ITS_DT_ITT_CFGR_L2SZ_4k;
330 break;
331 }
332
333 l2_bits = gicv5_its_l2sz_to_l2_bits(l2_sz);
334
335 if (l2_bits > id_bits)
336 return false;
337
338 *sz = l2_sz;
339
340 return true;
341 }
342
gicv5_its_device_get_itte_ref(struct gicv5_its_dev * its_dev,u16 event_id)343 static __le64 *gicv5_its_device_get_itte_ref(struct gicv5_its_dev *its_dev,
344 u16 event_id)
345 {
346 unsigned int l1_idx, l2_idx, l2_bits;
347 __le64 *l2_itt;
348
349 if (!its_dev->itt_cfg.l2itt) {
350 __le64 *itt = its_dev->itt_cfg.linear.itt;
351
352 return &itt[event_id];
353 }
354
355 l2_bits = gicv5_its_l2sz_to_l2_bits(its_dev->itt_cfg.l2.l2sz);
356 l1_idx = event_id >> l2_bits;
357 l2_idx = event_id & GENMASK(l2_bits - 1, 0);
358 l2_itt = its_dev->itt_cfg.l2.l2ptrs[l1_idx];
359
360 return &l2_itt[l2_idx];
361 }
362
gicv5_its_device_cache_inv(struct gicv5_its_chip_data * its,struct gicv5_its_dev * its_dev)363 static int gicv5_its_device_cache_inv(struct gicv5_its_chip_data *its,
364 struct gicv5_its_dev *its_dev)
365 {
366 u32 devicer;
367 u64 didr;
368
369 didr = FIELD_PREP(GICV5_ITS_DIDR_DEVICEID, its_dev->device_id);
370 devicer = FIELD_PREP(GICV5_ITS_INV_DEVICER_I, 0x1) |
371 FIELD_PREP(GICV5_ITS_INV_DEVICER_EVENTID_BITS,
372 its_dev->itt_cfg.event_id_bits) |
373 FIELD_PREP(GICV5_ITS_INV_DEVICER_L1, 0x0);
374 its_writeq_relaxed(its, didr, GICV5_ITS_DIDR);
375 its_writel_relaxed(its, devicer, GICV5_ITS_INV_DEVICER);
376
377 return gicv5_its_cache_sync(its);
378 }
379
380 /*
381 * Allocate a level 2 device table entry, update L1 parent to reference it.
382 * Only used for 2-level device tables, and it is called on demand.
383 */
gicv5_its_alloc_l2_devtab(struct gicv5_its_chip_data * its,unsigned int l1_index)384 static int gicv5_its_alloc_l2_devtab(struct gicv5_its_chip_data *its,
385 unsigned int l1_index)
386 {
387 __le64 *l2devtab, *l1devtab = its->devtab_cfgr.l2.l1devtab;
388 u8 span, l2sz, l2_bits;
389 u64 l1dte;
390
391 if (FIELD_GET(GICV5_DTL1E_VALID, le64_to_cpu(l1devtab[l1_index])))
392 return 0;
393
394 span = FIELD_GET(GICV5_DTL1E_SPAN, le64_to_cpu(l1devtab[l1_index]));
395 l2sz = devtab_cfgr_field(its, L2SZ);
396
397 l2_bits = gicv5_its_l2sz_to_l2_bits(l2sz);
398
399 /*
400 * Span allows us to create a smaller L2 device table.
401 * If it is too large, use the number of allowed L2 bits.
402 */
403 if (span > l2_bits)
404 span = l2_bits;
405
406 l2devtab = kcalloc(BIT(span), sizeof(*l2devtab), GFP_KERNEL);
407 if (!l2devtab)
408 return -ENOMEM;
409
410 its->devtab_cfgr.l2.l2ptrs[l1_index] = l2devtab;
411
412 l1dte = FIELD_PREP(GICV5_DTL1E_SPAN, span) |
413 (virt_to_phys(l2devtab) & GICV5_DTL1E_L2_ADDR_MASK) |
414 FIELD_PREP(GICV5_DTL1E_VALID, 0x1);
415 its_write_table_entry(its, &l1devtab[l1_index], l1dte);
416
417 return 0;
418 }
419
gicv5_its_devtab_get_dte_ref(struct gicv5_its_chip_data * its,u32 device_id,bool alloc)420 static __le64 *gicv5_its_devtab_get_dte_ref(struct gicv5_its_chip_data *its,
421 u32 device_id, bool alloc)
422 {
423 u8 str = devtab_cfgr_field(its, STRUCTURE);
424 unsigned int l2sz, l2_bits, l1_idx, l2_idx;
425 __le64 *l2devtab;
426 int ret;
427
428 if (str == GICV5_ITS_DT_ITT_CFGR_STRUCTURE_LINEAR) {
429 l2devtab = its->devtab_cfgr.linear.devtab;
430 return &l2devtab[device_id];
431 }
432
433 l2sz = devtab_cfgr_field(its, L2SZ);
434 l2_bits = gicv5_its_l2sz_to_l2_bits(l2sz);
435 l1_idx = device_id >> l2_bits;
436 l2_idx = device_id & GENMASK(l2_bits - 1, 0);
437
438 if (alloc) {
439 /*
440 * Allocate a new L2 device table here before
441 * continuing. We make the assumption that the span in
442 * the L1 table has been set correctly, and blindly use
443 * that value.
444 */
445 ret = gicv5_its_alloc_l2_devtab(its, l1_idx);
446 if (ret)
447 return NULL;
448 }
449
450 l2devtab = its->devtab_cfgr.l2.l2ptrs[l1_idx];
451 return &l2devtab[l2_idx];
452 }
453
454 /*
455 * Register a new device in the device table. Allocate an ITT and
456 * program the L2DTE entry according to the ITT structure that
457 * was chosen.
458 */
gicv5_its_device_register(struct gicv5_its_chip_data * its,struct gicv5_its_dev * its_dev)459 static int gicv5_its_device_register(struct gicv5_its_chip_data *its,
460 struct gicv5_its_dev *its_dev)
461 {
462 u8 event_id_bits, device_id_bits, itt_struct, itt_l2sz;
463 phys_addr_t itt_phys_base;
464 bool two_level_itt;
465 u32 idr1, idr2;
466 __le64 *dte;
467 u64 val;
468 int ret;
469
470 device_id_bits = devtab_cfgr_field(its, DEVICEID_BITS);
471
472 if (its_dev->device_id >= BIT(device_id_bits)) {
473 pr_err("Supplied DeviceID (%u) outside of Device Table range (%u)!",
474 its_dev->device_id, (u32)GENMASK(device_id_bits - 1, 0));
475 return -EINVAL;
476 }
477
478 dte = gicv5_its_devtab_get_dte_ref(its, its_dev->device_id, true);
479 if (!dte)
480 return -ENOMEM;
481
482 if (FIELD_GET(GICV5_DTL2E_VALID, le64_to_cpu(*dte)))
483 return -EBUSY;
484
485 /*
486 * Determine how many bits we need, validate those against the max.
487 * Based on these, determine if we should go for a 1- or 2-level ITT.
488 */
489 event_id_bits = order_base_2(its_dev->num_events);
490
491 idr2 = its_readl_relaxed(its, GICV5_ITS_IDR2);
492
493 if (event_id_bits > FIELD_GET(GICV5_ITS_IDR2_EVENTID_BITS, idr2)) {
494 pr_err("Required EventID bits (%u) larger than supported bits (%u)!",
495 event_id_bits,
496 (u8)FIELD_GET(GICV5_ITS_IDR2_EVENTID_BITS, idr2));
497 return -EINVAL;
498 }
499
500 idr1 = its_readl_relaxed(its, GICV5_ITS_IDR1);
501
502 /*
503 * L2 ITT size is programmed into the L2DTE regardless of
504 * whether a two-level or linear ITT is built, init it.
505 */
506 itt_l2sz = 0;
507
508 two_level_itt = gicv5_its_l2sz_two_level(false, idr1, event_id_bits,
509 &itt_l2sz);
510 if (two_level_itt)
511 ret = gicv5_its_create_itt_two_level(its, its_dev, event_id_bits,
512 itt_l2sz,
513 its_dev->num_events);
514 else
515 ret = gicv5_its_create_itt_linear(its, its_dev, event_id_bits);
516 if (ret)
517 return ret;
518
519 itt_phys_base = two_level_itt ? virt_to_phys(its_dev->itt_cfg.l2.l1itt) :
520 virt_to_phys(its_dev->itt_cfg.linear.itt);
521
522 itt_struct = two_level_itt ? GICV5_ITS_DT_ITT_CFGR_STRUCTURE_TWO_LEVEL :
523 GICV5_ITS_DT_ITT_CFGR_STRUCTURE_LINEAR;
524
525 val = FIELD_PREP(GICV5_DTL2E_EVENT_ID_BITS, event_id_bits) |
526 FIELD_PREP(GICV5_DTL2E_ITT_STRUCTURE, itt_struct) |
527 (itt_phys_base & GICV5_DTL2E_ITT_ADDR_MASK) |
528 FIELD_PREP(GICV5_DTL2E_ITT_L2SZ, itt_l2sz) |
529 FIELD_PREP(GICV5_DTL2E_VALID, 0x1);
530
531 its_write_table_entry(its, dte, val);
532
533 ret = gicv5_its_device_cache_inv(its, its_dev);
534 if (ret) {
535 its_write_table_entry(its, dte, 0);
536 gicv5_its_free_itt(its_dev);
537 return ret;
538 }
539
540 return 0;
541 }
542
543 /*
544 * Unregister a device in the device table. Lookup the device by ID, free the
545 * corresponding ITT, mark the device as invalid in the device table.
546 */
gicv5_its_device_unregister(struct gicv5_its_chip_data * its,struct gicv5_its_dev * its_dev)547 static int gicv5_its_device_unregister(struct gicv5_its_chip_data *its,
548 struct gicv5_its_dev *its_dev)
549 {
550 __le64 *dte;
551
552 dte = gicv5_its_devtab_get_dte_ref(its, its_dev->device_id, false);
553
554 if (!FIELD_GET(GICV5_DTL2E_VALID, le64_to_cpu(*dte))) {
555 pr_debug("Device table entry for DeviceID 0x%x is not valid. Nothing to clean up!",
556 its_dev->device_id);
557 return -EINVAL;
558 }
559
560 /* Zero everything - make it clear that this is an invalid entry */
561 its_write_table_entry(its, dte, 0);
562
563 gicv5_its_free_itt(its_dev);
564
565 return gicv5_its_device_cache_inv(its, its_dev);
566 }
567
568 /*
569 * Allocate a 1-level device table. All entries are allocated, but marked
570 * invalid.
571 */
gicv5_its_alloc_devtab_linear(struct gicv5_its_chip_data * its,u8 device_id_bits)572 static int gicv5_its_alloc_devtab_linear(struct gicv5_its_chip_data *its,
573 u8 device_id_bits)
574 {
575 __le64 *devtab;
576 size_t sz;
577 u64 baser;
578 u32 cfgr;
579
580 /*
581 * We expect a GICv5 implementation requiring a large number of
582 * deviceID bits to support a 2-level device table. If that's not
583 * the case, cap the number of deviceIDs supported according to the
584 * kmalloc limits so that the system can chug along with a linear
585 * device table.
586 */
587 sz = BIT_ULL(device_id_bits) * sizeof(*devtab);
588 if (sz > KMALLOC_MAX_SIZE) {
589 u8 device_id_cap = ilog2(KMALLOC_MAX_SIZE/sizeof(*devtab));
590
591 pr_warn("Limiting device ID bits from %u to %u\n",
592 device_id_bits, device_id_cap);
593 device_id_bits = device_id_cap;
594 }
595
596 devtab = kcalloc(BIT(device_id_bits), sizeof(*devtab), GFP_KERNEL);
597 if (!devtab)
598 return -ENOMEM;
599
600 gicv5_its_dcache_clean(its, devtab, sz);
601
602 cfgr = FIELD_PREP(GICV5_ITS_DT_CFGR_STRUCTURE,
603 GICV5_ITS_DT_ITT_CFGR_STRUCTURE_LINEAR) |
604 FIELD_PREP(GICV5_ITS_DT_CFGR_L2SZ, 0) |
605 FIELD_PREP(GICV5_ITS_DT_CFGR_DEVICEID_BITS, device_id_bits);
606 its_writel_relaxed(its, cfgr, GICV5_ITS_DT_CFGR);
607
608 baser = virt_to_phys(devtab) & GICV5_ITS_DT_BASER_ADDR_MASK;
609 its_writeq_relaxed(its, baser, GICV5_ITS_DT_BASER);
610
611 its->devtab_cfgr.cfgr = cfgr;
612 its->devtab_cfgr.linear.devtab = devtab;
613
614 return 0;
615 }
616
617 /*
618 * Allocate a 2-level device table. L2 entries are not allocated,
619 * they are allocated on-demand.
620 */
gicv5_its_alloc_devtab_two_level(struct gicv5_its_chip_data * its,u8 device_id_bits,u8 devtab_l2sz)621 static int gicv5_its_alloc_devtab_two_level(struct gicv5_its_chip_data *its,
622 u8 device_id_bits,
623 u8 devtab_l2sz)
624 {
625 unsigned int l1_bits, l2_bits, i;
626 __le64 *l1devtab, **l2ptrs;
627 size_t l1_sz;
628 u64 baser;
629 u32 cfgr;
630
631 l2_bits = gicv5_its_l2sz_to_l2_bits(devtab_l2sz);
632
633 l1_bits = device_id_bits - l2_bits;
634 l1_sz = BIT(l1_bits) * sizeof(*l1devtab);
635 /*
636 * With 2-level device table support it is highly unlikely
637 * that we are not able to allocate the required amount of
638 * device table memory to cover deviceID space; cap the
639 * deviceID space if we encounter such set-up.
640 * If this ever becomes a problem we could revisit the policy
641 * behind level 2 size selection to reduce level-1 deviceID bits.
642 */
643 if (l1_sz > KMALLOC_MAX_SIZE) {
644 l1_bits = ilog2(KMALLOC_MAX_SIZE/sizeof(*l1devtab));
645
646 pr_warn("Limiting device ID bits from %u to %u\n",
647 device_id_bits, l1_bits + l2_bits);
648 device_id_bits = l1_bits + l2_bits;
649 l1_sz = KMALLOC_MAX_SIZE;
650 }
651
652 l1devtab = kcalloc(BIT(l1_bits), sizeof(*l1devtab), GFP_KERNEL);
653 if (!l1devtab)
654 return -ENOMEM;
655
656 l2ptrs = kcalloc(BIT(l1_bits), sizeof(*l2ptrs), GFP_KERNEL);
657 if (!l2ptrs) {
658 kfree(l1devtab);
659 return -ENOMEM;
660 }
661
662 for (i = 0; i < BIT(l1_bits); i++)
663 l1devtab[i] = cpu_to_le64(FIELD_PREP(GICV5_DTL1E_SPAN, l2_bits));
664
665 gicv5_its_dcache_clean(its, l1devtab, l1_sz);
666
667 cfgr = FIELD_PREP(GICV5_ITS_DT_CFGR_STRUCTURE,
668 GICV5_ITS_DT_ITT_CFGR_STRUCTURE_TWO_LEVEL) |
669 FIELD_PREP(GICV5_ITS_DT_CFGR_L2SZ, devtab_l2sz) |
670 FIELD_PREP(GICV5_ITS_DT_CFGR_DEVICEID_BITS, device_id_bits);
671 its_writel_relaxed(its, cfgr, GICV5_ITS_DT_CFGR);
672
673 baser = virt_to_phys(l1devtab) & GICV5_ITS_DT_BASER_ADDR_MASK;
674 its_writeq_relaxed(its, baser, GICV5_ITS_DT_BASER);
675
676 its->devtab_cfgr.cfgr = cfgr;
677 its->devtab_cfgr.l2.l1devtab = l1devtab;
678 its->devtab_cfgr.l2.l2ptrs = l2ptrs;
679
680 return 0;
681 }
682
683 /*
684 * Initialise the device table as either 1- or 2-level depending on what is
685 * supported by the hardware.
686 */
gicv5_its_init_devtab(struct gicv5_its_chip_data * its)687 static int gicv5_its_init_devtab(struct gicv5_its_chip_data *its)
688 {
689 u8 device_id_bits, devtab_l2sz;
690 bool two_level_devtab;
691 u32 idr1;
692
693 idr1 = its_readl_relaxed(its, GICV5_ITS_IDR1);
694
695 device_id_bits = FIELD_GET(GICV5_ITS_IDR1_DEVICEID_BITS, idr1);
696 two_level_devtab = gicv5_its_l2sz_two_level(true, idr1, device_id_bits,
697 &devtab_l2sz);
698 if (two_level_devtab)
699 return gicv5_its_alloc_devtab_two_level(its, device_id_bits,
700 devtab_l2sz);
701 else
702 return gicv5_its_alloc_devtab_linear(its, device_id_bits);
703 }
704
gicv5_its_deinit_devtab(struct gicv5_its_chip_data * its)705 static void gicv5_its_deinit_devtab(struct gicv5_its_chip_data *its)
706 {
707 u8 str = devtab_cfgr_field(its, STRUCTURE);
708
709 if (str == GICV5_ITS_DT_ITT_CFGR_STRUCTURE_LINEAR) {
710 kfree(its->devtab_cfgr.linear.devtab);
711 } else {
712 kfree(its->devtab_cfgr.l2.l1devtab);
713 kfree(its->devtab_cfgr.l2.l2ptrs);
714 }
715 }
716
gicv5_its_compose_msi_msg(struct irq_data * d,struct msi_msg * msg)717 static void gicv5_its_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
718 {
719 struct gicv5_its_dev *its_dev = irq_data_get_irq_chip_data(d);
720 u64 addr = its_dev->its_trans_phys_base;
721
722 msg->data = FIELD_GET(GICV5_ITS_HWIRQ_EVENT_ID, d->hwirq);
723 msi_msg_set_addr(irq_data_get_msi_desc(d), msg, addr);
724 }
725
726 static const struct irq_chip gicv5_its_irq_chip = {
727 .name = "GICv5-ITS-MSI",
728 .irq_mask = irq_chip_mask_parent,
729 .irq_unmask = irq_chip_unmask_parent,
730 .irq_eoi = irq_chip_eoi_parent,
731 .irq_set_affinity = irq_chip_set_affinity_parent,
732 .irq_get_irqchip_state = irq_chip_get_parent_state,
733 .irq_set_irqchip_state = irq_chip_set_parent_state,
734 .irq_compose_msi_msg = gicv5_its_compose_msi_msg,
735 };
736
gicv5_its_find_device(struct gicv5_its_chip_data * its,u32 device_id)737 static struct gicv5_its_dev *gicv5_its_find_device(struct gicv5_its_chip_data *its,
738 u32 device_id)
739 {
740 struct gicv5_its_dev *dev = xa_load(&its->its_devices, device_id);
741
742 return dev ? dev : ERR_PTR(-ENODEV);
743 }
744
gicv5_its_alloc_device(struct gicv5_its_chip_data * its,int nvec,u32 dev_id)745 static struct gicv5_its_dev *gicv5_its_alloc_device(struct gicv5_its_chip_data *its, int nvec,
746 u32 dev_id)
747 {
748 struct gicv5_its_dev *its_dev;
749 void *entry;
750 int ret;
751
752 its_dev = gicv5_its_find_device(its, dev_id);
753 if (!IS_ERR(its_dev)) {
754 pr_err("A device with this DeviceID (0x%x) has already been registered.\n",
755 dev_id);
756
757 return ERR_PTR(-EBUSY);
758 }
759
760 its_dev = kzalloc_obj(*its_dev);
761 if (!its_dev)
762 return ERR_PTR(-ENOMEM);
763
764 its_dev->device_id = dev_id;
765 its_dev->num_events = nvec;
766
767 ret = gicv5_its_device_register(its, its_dev);
768 if (ret) {
769 pr_err("Failed to register the device\n");
770 goto out_dev_free;
771 }
772
773 its_dev->its_node = its;
774
775 its_dev->event_map = (unsigned long *)bitmap_zalloc(its_dev->num_events, GFP_KERNEL);
776 if (!its_dev->event_map) {
777 ret = -ENOMEM;
778 goto out_unregister;
779 }
780
781 entry = xa_store(&its->its_devices, dev_id, its_dev, GFP_KERNEL);
782 if (xa_is_err(entry)) {
783 ret = xa_err(entry);
784 goto out_bitmap_free;
785 }
786
787 return its_dev;
788
789 out_bitmap_free:
790 bitmap_free(its_dev->event_map);
791 out_unregister:
792 gicv5_its_device_unregister(its, its_dev);
793 out_dev_free:
794 kfree(its_dev);
795 return ERR_PTR(ret);
796 }
797
gicv5_its_msi_prepare(struct irq_domain * domain,struct device * dev,int nvec,msi_alloc_info_t * info)798 static int gicv5_its_msi_prepare(struct irq_domain *domain, struct device *dev,
799 int nvec, msi_alloc_info_t *info)
800 {
801 u32 dev_id = info->scratchpad[0].ul;
802 struct msi_domain_info *msi_info;
803 struct gicv5_its_chip_data *its;
804 struct gicv5_its_dev *its_dev;
805
806 msi_info = msi_get_domain_info(domain);
807 its = msi_info->data;
808
809 guard(mutex)(&its->dev_alloc_lock);
810
811 its_dev = gicv5_its_alloc_device(its, nvec, dev_id);
812 if (IS_ERR(its_dev))
813 return PTR_ERR(its_dev);
814
815 its_dev->its_trans_phys_base = info->scratchpad[1].ul;
816 info->scratchpad[0].ptr = its_dev;
817
818 return 0;
819 }
820
gicv5_its_msi_teardown(struct irq_domain * domain,msi_alloc_info_t * info)821 static void gicv5_its_msi_teardown(struct irq_domain *domain, msi_alloc_info_t *info)
822 {
823 struct gicv5_its_dev *its_dev = info->scratchpad[0].ptr;
824 struct msi_domain_info *msi_info;
825 struct gicv5_its_chip_data *its;
826
827 msi_info = msi_get_domain_info(domain);
828 its = msi_info->data;
829
830 guard(mutex)(&its->dev_alloc_lock);
831
832 if (WARN_ON_ONCE(!bitmap_empty(its_dev->event_map, its_dev->num_events)))
833 return;
834
835 xa_erase(&its->its_devices, its_dev->device_id);
836 bitmap_free(its_dev->event_map);
837 gicv5_its_device_unregister(its, its_dev);
838 kfree(its_dev);
839 }
840
841 static struct msi_domain_ops gicv5_its_msi_domain_ops = {
842 .msi_prepare = gicv5_its_msi_prepare,
843 .msi_teardown = gicv5_its_msi_teardown,
844 };
845
gicv5_its_map_event(struct gicv5_its_dev * its_dev,u16 event_id,u32 lpi)846 static int gicv5_its_map_event(struct gicv5_its_dev *its_dev, u16 event_id, u32 lpi)
847 {
848 struct gicv5_its_chip_data *its = its_dev->its_node;
849 u64 itt_entry;
850 __le64 *itte;
851
852 itte = gicv5_its_device_get_itte_ref(its_dev, event_id);
853
854 if (FIELD_GET(GICV5_ITTL2E_VALID, le64_to_cpu(*itte)))
855 return -EEXIST;
856
857 itt_entry = FIELD_PREP(GICV5_ITTL2E_LPI_ID, lpi) |
858 FIELD_PREP(GICV5_ITTL2E_VALID, 0x1);
859
860 its_write_table_entry(its, itte, itt_entry);
861
862 gicv5_its_itt_cache_inv(its, its_dev->device_id, event_id);
863
864 return 0;
865 }
866
gicv5_its_unmap_event(struct gicv5_its_dev * its_dev,u16 event_id)867 static void gicv5_its_unmap_event(struct gicv5_its_dev *its_dev, u16 event_id)
868 {
869 struct gicv5_its_chip_data *its = its_dev->its_node;
870 u64 itte_val;
871 __le64 *itte;
872
873 itte = gicv5_its_device_get_itte_ref(its_dev, event_id);
874
875 itte_val = le64_to_cpu(*itte);
876 itte_val &= ~GICV5_ITTL2E_VALID;
877
878 its_write_table_entry(its, itte, itte_val);
879
880 gicv5_its_itt_cache_inv(its, its_dev->device_id, event_id);
881 }
882
gicv5_its_alloc_eventid(struct gicv5_its_dev * its_dev,msi_alloc_info_t * info,unsigned int nr_irqs,u32 * eventid)883 static int gicv5_its_alloc_eventid(struct gicv5_its_dev *its_dev, msi_alloc_info_t *info,
884 unsigned int nr_irqs, u32 *eventid)
885 {
886 int event_id_base;
887
888 if (!(info->flags & MSI_ALLOC_FLAGS_FIXED_MSG_DATA)) {
889 event_id_base = bitmap_find_free_region(its_dev->event_map,
890 its_dev->num_events,
891 get_count_order(nr_irqs));
892 if (event_id_base < 0)
893 return event_id_base;
894 } else {
895 /*
896 * We want to have a fixed EventID mapped for hardcoded
897 * message data allocations.
898 */
899 if (WARN_ON_ONCE(nr_irqs != 1))
900 return -EINVAL;
901
902 event_id_base = info->hwirq;
903
904 if (event_id_base >= its_dev->num_events) {
905 pr_err("EventID outside of ITT range; cannot allocate an ITT entry!\n");
906
907 return -EINVAL;
908 }
909
910 if (test_and_set_bit(event_id_base, its_dev->event_map)) {
911 pr_warn("Can't reserve event_id bitmap\n");
912 return -EINVAL;
913
914 }
915 }
916
917 *eventid = event_id_base;
918
919 return 0;
920 }
921
gicv5_its_free_eventid(struct gicv5_its_dev * its_dev,u32 event_id_base,unsigned int nr_irqs)922 static void gicv5_its_free_eventid(struct gicv5_its_dev *its_dev, u32 event_id_base,
923 unsigned int nr_irqs)
924 {
925 bitmap_release_region(its_dev->event_map, event_id_base,
926 get_count_order(nr_irqs));
927 }
928
gicv5_its_irq_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * arg)929 static int gicv5_its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
930 unsigned int nr_irqs, void *arg)
931 {
932 u32 device_id, event_id_base, lpi;
933 struct gicv5_its_dev *its_dev;
934 msi_alloc_info_t *info = arg;
935 irq_hw_number_t hwirq;
936 struct irq_data *irqd;
937 int ret, i;
938
939 its_dev = info->scratchpad[0].ptr;
940
941 ret = gicv5_its_alloc_eventid(its_dev, info, nr_irqs, &event_id_base);
942 if (ret)
943 return ret;
944
945 ret = iommu_dma_prepare_msi(info->desc, its_dev->its_trans_phys_base);
946 if (ret)
947 goto out_eventid;
948
949 device_id = its_dev->device_id;
950
951 for (i = 0; i < nr_irqs; i++) {
952 ret = gicv5_alloc_lpi();
953 if (ret < 0) {
954 pr_debug("Failed to find free LPI!\n");
955 goto out_free_irqs;
956 }
957 lpi = ret;
958
959 ret = irq_domain_alloc_irqs_parent(domain, virq + i, 1, &lpi);
960 if (ret) {
961 gicv5_free_lpi(lpi);
962 goto out_free_irqs;
963 }
964
965 /*
966 * Store eventid and deviceid into the hwirq for later use.
967 *
968 * hwirq = event_id << 32 | device_id
969 */
970 hwirq = FIELD_PREP(GICV5_ITS_HWIRQ_DEVICE_ID, device_id) |
971 FIELD_PREP(GICV5_ITS_HWIRQ_EVENT_ID, (u64)event_id_base + i);
972 irq_domain_set_info(domain, virq + i, hwirq,
973 &gicv5_its_irq_chip, its_dev,
974 handle_fasteoi_irq, NULL, NULL);
975
976 irqd = irq_get_irq_data(virq + i);
977 irqd_set_single_target(irqd);
978 irqd_set_affinity_on_activate(irqd);
979 }
980
981 return 0;
982
983 out_free_irqs:
984 while (--i >= 0) {
985 irqd = irq_domain_get_irq_data(domain, virq + i);
986 gicv5_free_lpi(irqd->parent_data->hwirq);
987 irq_domain_reset_irq_data(irqd);
988 irq_domain_free_irqs_parent(domain, virq + i, 1);
989 }
990 out_eventid:
991 gicv5_its_free_eventid(its_dev, event_id_base, nr_irqs);
992 return ret;
993 }
994
gicv5_its_irq_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)995 static void gicv5_its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
996 unsigned int nr_irqs)
997 {
998 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
999 struct gicv5_its_chip_data *its;
1000 struct gicv5_its_dev *its_dev;
1001 u16 event_id_base;
1002 unsigned int i;
1003
1004 its_dev = irq_data_get_irq_chip_data(d);
1005 its = its_dev->its_node;
1006
1007 event_id_base = FIELD_GET(GICV5_ITS_HWIRQ_EVENT_ID, d->hwirq);
1008
1009 bitmap_release_region(its_dev->event_map, event_id_base,
1010 get_count_order(nr_irqs));
1011
1012 /* Hierarchically free irq data */
1013 for (i = 0; i < nr_irqs; i++) {
1014 d = irq_domain_get_irq_data(domain, virq + i);
1015
1016 gicv5_free_lpi(d->parent_data->hwirq);
1017 irq_domain_reset_irq_data(d);
1018 irq_domain_free_irqs_parent(domain, virq + i, 1);
1019 }
1020
1021 gicv5_its_syncr(its, its_dev);
1022 gicv5_irs_syncr();
1023 }
1024
gicv5_its_irq_domain_activate(struct irq_domain * domain,struct irq_data * d,bool reserve)1025 static int gicv5_its_irq_domain_activate(struct irq_domain *domain, struct irq_data *d,
1026 bool reserve)
1027 {
1028 struct gicv5_its_dev *its_dev = irq_data_get_irq_chip_data(d);
1029 u16 event_id;
1030 u32 lpi;
1031
1032 event_id = FIELD_GET(GICV5_ITS_HWIRQ_EVENT_ID, d->hwirq);
1033 lpi = d->parent_data->hwirq;
1034
1035 return gicv5_its_map_event(its_dev, event_id, lpi);
1036 }
1037
gicv5_its_irq_domain_deactivate(struct irq_domain * domain,struct irq_data * d)1038 static void gicv5_its_irq_domain_deactivate(struct irq_domain *domain,
1039 struct irq_data *d)
1040 {
1041 struct gicv5_its_dev *its_dev = irq_data_get_irq_chip_data(d);
1042 u16 event_id;
1043
1044 event_id = FIELD_GET(GICV5_ITS_HWIRQ_EVENT_ID, d->hwirq);
1045
1046 gicv5_its_unmap_event(its_dev, event_id);
1047 }
1048
1049 static const struct irq_domain_ops gicv5_its_irq_domain_ops = {
1050 .alloc = gicv5_its_irq_domain_alloc,
1051 .free = gicv5_its_irq_domain_free,
1052 .activate = gicv5_its_irq_domain_activate,
1053 .deactivate = gicv5_its_irq_domain_deactivate,
1054 .select = msi_lib_irq_domain_select,
1055 };
1056
gicv5_its_write_cr0(struct gicv5_its_chip_data * its,bool enable)1057 static int gicv5_its_write_cr0(struct gicv5_its_chip_data *its, bool enable)
1058 {
1059 u32 cr0 = FIELD_PREP(GICV5_ITS_CR0_ITSEN, enable);
1060
1061 its_writel_relaxed(its, cr0, GICV5_ITS_CR0);
1062 return gicv5_wait_for_op_atomic(its->its_base, GICV5_ITS_CR0,
1063 GICV5_ITS_CR0_IDLE, NULL);
1064 }
1065
gicv5_its_enable(struct gicv5_its_chip_data * its)1066 static int gicv5_its_enable(struct gicv5_its_chip_data *its)
1067 {
1068 return gicv5_its_write_cr0(its, true);
1069 }
1070
gicv5_its_disable(struct gicv5_its_chip_data * its)1071 static int gicv5_its_disable(struct gicv5_its_chip_data *its)
1072 {
1073 return gicv5_its_write_cr0(its, false);
1074 }
1075
gicv5_its_print_info(struct gicv5_its_chip_data * its_node)1076 static void gicv5_its_print_info(struct gicv5_its_chip_data *its_node)
1077 {
1078 bool devtab_linear;
1079 u8 device_id_bits;
1080 u8 str;
1081
1082 device_id_bits = devtab_cfgr_field(its_node, DEVICEID_BITS);
1083
1084 str = devtab_cfgr_field(its_node, STRUCTURE);
1085 devtab_linear = (str == GICV5_ITS_DT_ITT_CFGR_STRUCTURE_LINEAR);
1086
1087 pr_info("ITS %s enabled using %s device table device_id_bits %u\n",
1088 fwnode_get_name(its_node->fwnode),
1089 devtab_linear ? "linear" : "2-level",
1090 device_id_bits);
1091 }
1092
gicv5_its_init_domain(struct gicv5_its_chip_data * its,struct irq_domain * parent)1093 static int gicv5_its_init_domain(struct gicv5_its_chip_data *its, struct irq_domain *parent)
1094 {
1095 struct irq_domain_info dom_info = {
1096 .fwnode = its->fwnode,
1097 .ops = &gicv5_its_irq_domain_ops,
1098 .domain_flags = its->msi_domain_flags,
1099 .parent = parent,
1100 };
1101 struct msi_domain_info *info;
1102
1103 info = kzalloc_obj(*info);
1104 if (!info)
1105 return -ENOMEM;
1106
1107 info->ops = &gicv5_its_msi_domain_ops;
1108 info->data = its;
1109 dom_info.host_data = info;
1110
1111 if (!msi_create_parent_irq_domain(&dom_info, &gic_v5_its_msi_parent_ops)) {
1112 kfree(info);
1113 return -ENOMEM;
1114 }
1115
1116 return 0;
1117 }
1118
gicv5_its_init_bases(void __iomem * its_base,struct fwnode_handle * handle,struct irq_domain * parent_domain,bool noncoherent)1119 static int __init gicv5_its_init_bases(void __iomem *its_base, struct fwnode_handle *handle,
1120 struct irq_domain *parent_domain, bool noncoherent)
1121 {
1122 struct device_node *np = to_of_node(handle);
1123 struct gicv5_its_chip_data *its_node;
1124 u32 cr0, cr1;
1125 bool enabled;
1126 int ret;
1127
1128 its_node = kzalloc_obj(*its_node);
1129 if (!its_node)
1130 return -ENOMEM;
1131
1132 mutex_init(&its_node->dev_alloc_lock);
1133 xa_init(&its_node->its_devices);
1134 its_node->fwnode = handle;
1135 its_node->its_base = its_base;
1136 its_node->msi_domain_flags = IRQ_DOMAIN_FLAG_ISOLATED_MSI |
1137 IRQ_DOMAIN_FLAG_FWNODE_PARENT;
1138
1139 cr0 = its_readl_relaxed(its_node, GICV5_ITS_CR0);
1140 enabled = FIELD_GET(GICV5_ITS_CR0_ITSEN, cr0);
1141 if (WARN(enabled, "ITS %s enabled, disabling it before proceeding\n", np->full_name)) {
1142 ret = gicv5_its_disable(its_node);
1143 if (ret)
1144 goto out_free_node;
1145 }
1146
1147 if (of_property_read_bool(np, "dma-noncoherent")) {
1148 /*
1149 * A non-coherent ITS implies that some cache levels cannot be
1150 * used coherently by the cores and GIC. Our only option is to mark
1151 * memory attributes for the GIC as non-cacheable; by default,
1152 * non-cacheable memory attributes imply outer-shareable
1153 * shareability, the value written into ITS_CR1_SH is ignored.
1154 */
1155 cr1 = FIELD_PREP(GICV5_ITS_CR1_ITT_RA, GICV5_NO_READ_ALLOC) |
1156 FIELD_PREP(GICV5_ITS_CR1_DT_RA, GICV5_NO_READ_ALLOC) |
1157 FIELD_PREP(GICV5_ITS_CR1_IC, GICV5_NON_CACHE) |
1158 FIELD_PREP(GICV5_ITS_CR1_OC, GICV5_NON_CACHE);
1159 its_node->flags |= ITS_FLAGS_NON_COHERENT;
1160 } else {
1161 cr1 = FIELD_PREP(GICV5_ITS_CR1_ITT_RA, GICV5_READ_ALLOC) |
1162 FIELD_PREP(GICV5_ITS_CR1_DT_RA, GICV5_READ_ALLOC) |
1163 FIELD_PREP(GICV5_ITS_CR1_IC, GICV5_WB_CACHE) |
1164 FIELD_PREP(GICV5_ITS_CR1_OC, GICV5_WB_CACHE) |
1165 FIELD_PREP(GICV5_ITS_CR1_SH, GICV5_INNER_SHARE);
1166 }
1167
1168 its_writel_relaxed(its_node, cr1, GICV5_ITS_CR1);
1169
1170 ret = gicv5_its_init_devtab(its_node);
1171 if (ret)
1172 goto out_free_node;
1173
1174 ret = gicv5_its_enable(its_node);
1175 if (ret)
1176 goto out_free_devtab;
1177
1178 ret = gicv5_its_init_domain(its_node, parent_domain);
1179 if (ret)
1180 goto out_disable_its;
1181
1182 gicv5_its_print_info(its_node);
1183
1184 return 0;
1185
1186 out_disable_its:
1187 gicv5_its_disable(its_node);
1188 out_free_devtab:
1189 gicv5_its_deinit_devtab(its_node);
1190 out_free_node:
1191 kfree(its_node);
1192 return ret;
1193 }
1194
gicv5_its_init(struct device_node * node)1195 static int __init gicv5_its_init(struct device_node *node)
1196 {
1197 void __iomem *its_base;
1198 int ret, idx;
1199
1200 idx = of_property_match_string(node, "reg-names", "ns-config");
1201 if (idx < 0) {
1202 pr_err("%pOF: ns-config reg-name not present\n", node);
1203 return -ENODEV;
1204 }
1205
1206 its_base = of_io_request_and_map(node, idx, of_node_full_name(node));
1207 if (IS_ERR(its_base)) {
1208 pr_err("%pOF: unable to map GICv5 ITS_CONFIG_FRAME\n", node);
1209 return PTR_ERR(its_base);
1210 }
1211
1212 ret = gicv5_its_init_bases(its_base, of_fwnode_handle(node),
1213 gicv5_global_data.lpi_domain,
1214 of_property_read_bool(node, "dma-noncoherent"));
1215 if (ret)
1216 goto out_unmap;
1217
1218 return 0;
1219
1220 out_unmap:
1221 iounmap(its_base);
1222 return ret;
1223 }
1224
gicv5_its_of_probe(struct device_node * parent)1225 void __init gicv5_its_of_probe(struct device_node *parent)
1226 {
1227 struct device_node *np;
1228
1229 for_each_available_child_of_node(parent, np) {
1230 if (!of_device_is_compatible(np, "arm,gic-v5-its"))
1231 continue;
1232
1233 if (gicv5_its_init(np))
1234 pr_err("Failed to init ITS %s\n", np->full_name);
1235 }
1236 }
1237
1238 #ifdef CONFIG_ACPI
1239
1240 #define ACPI_GICV5_ITS_MEM_SIZE (SZ_64K)
1241
1242 static struct acpi_madt_gicv5_translator *current_its_entry __initdata;
1243 static struct fwnode_handle *current_its_fwnode __initdata;
1244
gic_acpi_parse_madt_its_translate(union acpi_subtable_headers * header,const unsigned long end)1245 static int __init gic_acpi_parse_madt_its_translate(union acpi_subtable_headers *header,
1246 const unsigned long end)
1247 {
1248 struct acpi_madt_gicv5_translate_frame *its_frame;
1249 struct fwnode_handle *msi_dom_handle;
1250 struct resource res = {};
1251 int err;
1252
1253 its_frame = (struct acpi_madt_gicv5_translate_frame *)header;
1254 if (its_frame->linked_translator_id != current_its_entry->translator_id)
1255 return 0;
1256
1257 res.start = its_frame->base_address;
1258 res.end = its_frame->base_address + ACPI_GICV5_ITS_MEM_SIZE - 1;
1259 res.flags = IORESOURCE_MEM;
1260
1261 msi_dom_handle = irq_domain_alloc_parented_fwnode(&res.start, current_its_fwnode);
1262 if (!msi_dom_handle) {
1263 pr_err("ITS@%pa: Unable to allocate GICv5 ITS translate domain token\n",
1264 &res.start);
1265 return -ENOMEM;
1266 }
1267
1268 err = iort_register_domain_token(its_frame->translate_frame_id, res.start,
1269 msi_dom_handle);
1270 if (err) {
1271 pr_err("ITS@%pa: Unable to register GICv5 ITS domain token (ITS TRANSLATE FRAME ID %d) to IORT\n",
1272 &res.start, its_frame->translate_frame_id);
1273 irq_domain_free_fwnode(msi_dom_handle);
1274 return err;
1275 }
1276
1277 return 0;
1278 }
1279
gic_acpi_free_madt_its_translate(union acpi_subtable_headers * header,const unsigned long end)1280 static int __init gic_acpi_free_madt_its_translate(union acpi_subtable_headers *header,
1281 const unsigned long end)
1282 {
1283 struct acpi_madt_gicv5_translate_frame *its_frame;
1284 struct fwnode_handle *msi_dom_handle;
1285
1286 its_frame = (struct acpi_madt_gicv5_translate_frame *)header;
1287 if (its_frame->linked_translator_id != current_its_entry->translator_id)
1288 return 0;
1289
1290 msi_dom_handle = iort_find_domain_token(its_frame->translate_frame_id);
1291 if (!msi_dom_handle)
1292 return 0;
1293
1294 iort_deregister_domain_token(its_frame->translate_frame_id);
1295 irq_domain_free_fwnode(msi_dom_handle);
1296
1297 return 0;
1298 }
1299
gic_acpi_parse_madt_its(union acpi_subtable_headers * header,const unsigned long end)1300 static int __init gic_acpi_parse_madt_its(union acpi_subtable_headers *header,
1301 const unsigned long end)
1302 {
1303 struct acpi_madt_gicv5_translator *its_entry;
1304 struct fwnode_handle *dom_handle;
1305 struct resource res = {};
1306 void __iomem *its_base;
1307 int err;
1308
1309 its_entry = (struct acpi_madt_gicv5_translator *)header;
1310 res.start = its_entry->base_address;
1311 res.end = its_entry->base_address + ACPI_GICV5_ITS_MEM_SIZE - 1;
1312 res.flags = IORESOURCE_MEM;
1313
1314 if (!request_mem_region(res.start, resource_size(&res), "GICv5 ITS"))
1315 return -EBUSY;
1316
1317 dom_handle = irq_domain_alloc_fwnode(&res.start);
1318 if (!dom_handle) {
1319 pr_err("ITS@%pa: Unable to allocate GICv5 ITS domain token\n",
1320 &res.start);
1321 err = -ENOMEM;
1322 goto out_rel_res;
1323 }
1324
1325 current_its_entry = its_entry;
1326 current_its_fwnode = dom_handle;
1327
1328 acpi_table_parse_madt(ACPI_MADT_TYPE_GICV5_ITS_TRANSLATE,
1329 gic_acpi_parse_madt_its_translate, 0);
1330
1331 its_base = ioremap(res.start, ACPI_GICV5_ITS_MEM_SIZE);
1332 if (!its_base) {
1333 err = -ENOMEM;
1334 goto out_unregister;
1335 }
1336
1337 err = gicv5_its_init_bases(its_base, dom_handle, gicv5_global_data.lpi_domain,
1338 its_entry->flags & ACPI_MADT_GICV5_ITS_NON_COHERENT);
1339 if (err)
1340 goto out_unmap;
1341
1342 return 0;
1343
1344 out_unmap:
1345 iounmap(its_base);
1346 out_unregister:
1347 acpi_table_parse_madt(ACPI_MADT_TYPE_GICV5_ITS_TRANSLATE,
1348 gic_acpi_free_madt_its_translate, 0);
1349 irq_domain_free_fwnode(dom_handle);
1350 out_rel_res:
1351 release_mem_region(res.start, resource_size(&res));
1352 return err;
1353 }
1354
gicv5_its_acpi_probe(void)1355 void __init gicv5_its_acpi_probe(void)
1356 {
1357 acpi_table_parse_madt(ACPI_MADT_TYPE_GICV5_ITS, gic_acpi_parse_madt_its, 0);
1358 }
1359 #else
gicv5_its_acpi_probe(void)1360 void __init gicv5_its_acpi_probe(void) { }
1361 #endif
1362