1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved. 4 * Author: Marc Zyngier <marc.zyngier@arm.com> 5 */ 6 7 #include <linux/acpi.h> 8 #include <linux/acpi_iort.h> 9 #include <linux/bitmap.h> 10 #include <linux/cpu.h> 11 #include <linux/crash_dump.h> 12 #include <linux/delay.h> 13 #include <linux/dma-iommu.h> 14 #include <linux/efi.h> 15 #include <linux/interrupt.h> 16 #include <linux/irqdomain.h> 17 #include <linux/list.h> 18 #include <linux/log2.h> 19 #include <linux/memblock.h> 20 #include <linux/mm.h> 21 #include <linux/msi.h> 22 #include <linux/of.h> 23 #include <linux/of_address.h> 24 #include <linux/of_irq.h> 25 #include <linux/of_pci.h> 26 #include <linux/of_platform.h> 27 #include <linux/percpu.h> 28 #include <linux/slab.h> 29 #include <linux/syscore_ops.h> 30 31 #include <linux/irqchip.h> 32 #include <linux/irqchip/arm-gic-v3.h> 33 #include <linux/irqchip/arm-gic-v4.h> 34 35 #include <asm/cputype.h> 36 #include <asm/exception.h> 37 38 #include "irq-gic-common.h" 39 40 #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0) 41 #define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1) 42 #define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2) 43 #define ITS_FLAGS_SAVE_SUSPEND_STATE (1ULL << 3) 44 45 #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0) 46 #define RDIST_FLAGS_RD_TABLES_PREALLOCATED (1 << 1) 47 48 static u32 lpi_id_bits; 49 50 /* 51 * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to 52 * deal with (one configuration byte per interrupt). PENDBASE has to 53 * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI). 54 */ 55 #define LPI_NRBITS lpi_id_bits 56 #define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K) 57 #define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K) 58 59 #define LPI_PROP_DEFAULT_PRIO GICD_INT_DEF_PRI 60 61 /* 62 * Collection structure - just an ID, and a redistributor address to 63 * ping. We use one per CPU as a bag of interrupts assigned to this 64 * CPU. 65 */ 66 struct its_collection { 67 u64 target_address; 68 u16 col_id; 69 }; 70 71 /* 72 * The ITS_BASER structure - contains memory information, cached 73 * value of BASER register configuration and ITS page size. 74 */ 75 struct its_baser { 76 void *base; 77 u64 val; 78 u32 order; 79 u32 psz; 80 }; 81 82 struct its_device; 83 84 /* 85 * The ITS structure - contains most of the infrastructure, with the 86 * top-level MSI domain, the command queue, the collections, and the 87 * list of devices writing to it. 88 * 89 * dev_alloc_lock has to be taken for device allocations, while the 90 * spinlock must be taken to parse data structures such as the device 91 * list. 92 */ 93 struct its_node { 94 raw_spinlock_t lock; 95 struct mutex dev_alloc_lock; 96 struct list_head entry; 97 void __iomem *base; 98 phys_addr_t phys_base; 99 struct its_cmd_block *cmd_base; 100 struct its_cmd_block *cmd_write; 101 struct its_baser tables[GITS_BASER_NR_REGS]; 102 struct its_collection *collections; 103 struct fwnode_handle *fwnode_handle; 104 u64 (*get_msi_base)(struct its_device *its_dev); 105 u64 cbaser_save; 106 u32 ctlr_save; 107 struct list_head its_device_list; 108 u64 flags; 109 unsigned long list_nr; 110 u32 ite_size; 111 u32 device_ids; 112 int numa_node; 113 unsigned int msi_domain_flags; 114 u32 pre_its_base; /* for Socionext Synquacer */ 115 bool is_v4; 116 int vlpi_redist_offset; 117 }; 118 119 #define ITS_ITT_ALIGN SZ_256 120 121 /* The maximum number of VPEID bits supported by VLPI commands */ 122 #define ITS_MAX_VPEID_BITS (16) 123 #define ITS_MAX_VPEID (1 << (ITS_MAX_VPEID_BITS)) 124 125 /* Convert page order to size in bytes */ 126 #define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o)) 127 128 struct event_lpi_map { 129 unsigned long *lpi_map; 130 u16 *col_map; 131 irq_hw_number_t lpi_base; 132 int nr_lpis; 133 struct mutex vlpi_lock; 134 struct its_vm *vm; 135 struct its_vlpi_map *vlpi_maps; 136 int nr_vlpis; 137 }; 138 139 /* 140 * The ITS view of a device - belongs to an ITS, owns an interrupt 141 * translation table, and a list of interrupts. If it some of its 142 * LPIs are injected into a guest (GICv4), the event_map.vm field 143 * indicates which one. 144 */ 145 struct its_device { 146 struct list_head entry; 147 struct its_node *its; 148 struct event_lpi_map event_map; 149 void *itt; 150 u32 nr_ites; 151 u32 device_id; 152 bool shared; 153 }; 154 155 static struct { 156 raw_spinlock_t lock; 157 struct its_device *dev; 158 struct its_vpe **vpes; 159 int next_victim; 160 } vpe_proxy; 161 162 static LIST_HEAD(its_nodes); 163 static DEFINE_RAW_SPINLOCK(its_lock); 164 static struct rdists *gic_rdists; 165 static struct irq_domain *its_parent; 166 167 static unsigned long its_list_map; 168 static u16 vmovp_seq_num; 169 static DEFINE_RAW_SPINLOCK(vmovp_lock); 170 171 static DEFINE_IDA(its_vpeid_ida); 172 173 #define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist)) 174 #define gic_data_rdist_cpu(cpu) (per_cpu_ptr(gic_rdists->rdist, cpu)) 175 #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) 176 #define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K) 177 178 static struct its_collection *dev_event_to_col(struct its_device *its_dev, 179 u32 event) 180 { 181 struct its_node *its = its_dev->its; 182 183 return its->collections + its_dev->event_map.col_map[event]; 184 } 185 186 static struct its_collection *valid_col(struct its_collection *col) 187 { 188 if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(0, 15))) 189 return NULL; 190 191 return col; 192 } 193 194 static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe) 195 { 196 if (valid_col(its->collections + vpe->col_idx)) 197 return vpe; 198 199 return NULL; 200 } 201 202 /* 203 * ITS command descriptors - parameters to be encoded in a command 204 * block. 205 */ 206 struct its_cmd_desc { 207 union { 208 struct { 209 struct its_device *dev; 210 u32 event_id; 211 } its_inv_cmd; 212 213 struct { 214 struct its_device *dev; 215 u32 event_id; 216 } its_clear_cmd; 217 218 struct { 219 struct its_device *dev; 220 u32 event_id; 221 } its_int_cmd; 222 223 struct { 224 struct its_device *dev; 225 int valid; 226 } its_mapd_cmd; 227 228 struct { 229 struct its_collection *col; 230 int valid; 231 } its_mapc_cmd; 232 233 struct { 234 struct its_device *dev; 235 u32 phys_id; 236 u32 event_id; 237 } its_mapti_cmd; 238 239 struct { 240 struct its_device *dev; 241 struct its_collection *col; 242 u32 event_id; 243 } its_movi_cmd; 244 245 struct { 246 struct its_device *dev; 247 u32 event_id; 248 } its_discard_cmd; 249 250 struct { 251 struct its_collection *col; 252 } its_invall_cmd; 253 254 struct { 255 struct its_vpe *vpe; 256 } its_vinvall_cmd; 257 258 struct { 259 struct its_vpe *vpe; 260 struct its_collection *col; 261 bool valid; 262 } its_vmapp_cmd; 263 264 struct { 265 struct its_vpe *vpe; 266 struct its_device *dev; 267 u32 virt_id; 268 u32 event_id; 269 bool db_enabled; 270 } its_vmapti_cmd; 271 272 struct { 273 struct its_vpe *vpe; 274 struct its_device *dev; 275 u32 event_id; 276 bool db_enabled; 277 } its_vmovi_cmd; 278 279 struct { 280 struct its_vpe *vpe; 281 struct its_collection *col; 282 u16 seq_num; 283 u16 its_list; 284 } its_vmovp_cmd; 285 }; 286 }; 287 288 /* 289 * The ITS command block, which is what the ITS actually parses. 290 */ 291 struct its_cmd_block { 292 u64 raw_cmd[4]; 293 }; 294 295 #define ITS_CMD_QUEUE_SZ SZ_64K 296 #define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block)) 297 298 typedef struct its_collection *(*its_cmd_builder_t)(struct its_node *, 299 struct its_cmd_block *, 300 struct its_cmd_desc *); 301 302 typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_node *, 303 struct its_cmd_block *, 304 struct its_cmd_desc *); 305 306 static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l) 307 { 308 u64 mask = GENMASK_ULL(h, l); 309 *raw_cmd &= ~mask; 310 *raw_cmd |= (val << l) & mask; 311 } 312 313 static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr) 314 { 315 its_mask_encode(&cmd->raw_cmd[0], cmd_nr, 7, 0); 316 } 317 318 static void its_encode_devid(struct its_cmd_block *cmd, u32 devid) 319 { 320 its_mask_encode(&cmd->raw_cmd[0], devid, 63, 32); 321 } 322 323 static void its_encode_event_id(struct its_cmd_block *cmd, u32 id) 324 { 325 its_mask_encode(&cmd->raw_cmd[1], id, 31, 0); 326 } 327 328 static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id) 329 { 330 its_mask_encode(&cmd->raw_cmd[1], phys_id, 63, 32); 331 } 332 333 static void its_encode_size(struct its_cmd_block *cmd, u8 size) 334 { 335 its_mask_encode(&cmd->raw_cmd[1], size, 4, 0); 336 } 337 338 static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr) 339 { 340 its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 51, 8); 341 } 342 343 static void its_encode_valid(struct its_cmd_block *cmd, int valid) 344 { 345 its_mask_encode(&cmd->raw_cmd[2], !!valid, 63, 63); 346 } 347 348 static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr) 349 { 350 its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 51, 16); 351 } 352 353 static void its_encode_collection(struct its_cmd_block *cmd, u16 col) 354 { 355 its_mask_encode(&cmd->raw_cmd[2], col, 15, 0); 356 } 357 358 static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid) 359 { 360 its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32); 361 } 362 363 static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id) 364 { 365 its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0); 366 } 367 368 static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id) 369 { 370 its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32); 371 } 372 373 static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid) 374 { 375 its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0); 376 } 377 378 static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num) 379 { 380 its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32); 381 } 382 383 static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list) 384 { 385 its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0); 386 } 387 388 static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa) 389 { 390 its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 51, 16); 391 } 392 393 static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size) 394 { 395 its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0); 396 } 397 398 static inline void its_fixup_cmd(struct its_cmd_block *cmd) 399 { 400 /* Let's fixup BE commands */ 401 cmd->raw_cmd[0] = cpu_to_le64(cmd->raw_cmd[0]); 402 cmd->raw_cmd[1] = cpu_to_le64(cmd->raw_cmd[1]); 403 cmd->raw_cmd[2] = cpu_to_le64(cmd->raw_cmd[2]); 404 cmd->raw_cmd[3] = cpu_to_le64(cmd->raw_cmd[3]); 405 } 406 407 static struct its_collection *its_build_mapd_cmd(struct its_node *its, 408 struct its_cmd_block *cmd, 409 struct its_cmd_desc *desc) 410 { 411 unsigned long itt_addr; 412 u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites); 413 414 itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt); 415 itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN); 416 417 its_encode_cmd(cmd, GITS_CMD_MAPD); 418 its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id); 419 its_encode_size(cmd, size - 1); 420 its_encode_itt(cmd, itt_addr); 421 its_encode_valid(cmd, desc->its_mapd_cmd.valid); 422 423 its_fixup_cmd(cmd); 424 425 return NULL; 426 } 427 428 static struct its_collection *its_build_mapc_cmd(struct its_node *its, 429 struct its_cmd_block *cmd, 430 struct its_cmd_desc *desc) 431 { 432 its_encode_cmd(cmd, GITS_CMD_MAPC); 433 its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id); 434 its_encode_target(cmd, desc->its_mapc_cmd.col->target_address); 435 its_encode_valid(cmd, desc->its_mapc_cmd.valid); 436 437 its_fixup_cmd(cmd); 438 439 return desc->its_mapc_cmd.col; 440 } 441 442 static struct its_collection *its_build_mapti_cmd(struct its_node *its, 443 struct its_cmd_block *cmd, 444 struct its_cmd_desc *desc) 445 { 446 struct its_collection *col; 447 448 col = dev_event_to_col(desc->its_mapti_cmd.dev, 449 desc->its_mapti_cmd.event_id); 450 451 its_encode_cmd(cmd, GITS_CMD_MAPTI); 452 its_encode_devid(cmd, desc->its_mapti_cmd.dev->device_id); 453 its_encode_event_id(cmd, desc->its_mapti_cmd.event_id); 454 its_encode_phys_id(cmd, desc->its_mapti_cmd.phys_id); 455 its_encode_collection(cmd, col->col_id); 456 457 its_fixup_cmd(cmd); 458 459 return valid_col(col); 460 } 461 462 static struct its_collection *its_build_movi_cmd(struct its_node *its, 463 struct its_cmd_block *cmd, 464 struct its_cmd_desc *desc) 465 { 466 struct its_collection *col; 467 468 col = dev_event_to_col(desc->its_movi_cmd.dev, 469 desc->its_movi_cmd.event_id); 470 471 its_encode_cmd(cmd, GITS_CMD_MOVI); 472 its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id); 473 its_encode_event_id(cmd, desc->its_movi_cmd.event_id); 474 its_encode_collection(cmd, desc->its_movi_cmd.col->col_id); 475 476 its_fixup_cmd(cmd); 477 478 return valid_col(col); 479 } 480 481 static struct its_collection *its_build_discard_cmd(struct its_node *its, 482 struct its_cmd_block *cmd, 483 struct its_cmd_desc *desc) 484 { 485 struct its_collection *col; 486 487 col = dev_event_to_col(desc->its_discard_cmd.dev, 488 desc->its_discard_cmd.event_id); 489 490 its_encode_cmd(cmd, GITS_CMD_DISCARD); 491 its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id); 492 its_encode_event_id(cmd, desc->its_discard_cmd.event_id); 493 494 its_fixup_cmd(cmd); 495 496 return valid_col(col); 497 } 498 499 static struct its_collection *its_build_inv_cmd(struct its_node *its, 500 struct its_cmd_block *cmd, 501 struct its_cmd_desc *desc) 502 { 503 struct its_collection *col; 504 505 col = dev_event_to_col(desc->its_inv_cmd.dev, 506 desc->its_inv_cmd.event_id); 507 508 its_encode_cmd(cmd, GITS_CMD_INV); 509 its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id); 510 its_encode_event_id(cmd, desc->its_inv_cmd.event_id); 511 512 its_fixup_cmd(cmd); 513 514 return valid_col(col); 515 } 516 517 static struct its_collection *its_build_int_cmd(struct its_node *its, 518 struct its_cmd_block *cmd, 519 struct its_cmd_desc *desc) 520 { 521 struct its_collection *col; 522 523 col = dev_event_to_col(desc->its_int_cmd.dev, 524 desc->its_int_cmd.event_id); 525 526 its_encode_cmd(cmd, GITS_CMD_INT); 527 its_encode_devid(cmd, desc->its_int_cmd.dev->device_id); 528 its_encode_event_id(cmd, desc->its_int_cmd.event_id); 529 530 its_fixup_cmd(cmd); 531 532 return valid_col(col); 533 } 534 535 static struct its_collection *its_build_clear_cmd(struct its_node *its, 536 struct its_cmd_block *cmd, 537 struct its_cmd_desc *desc) 538 { 539 struct its_collection *col; 540 541 col = dev_event_to_col(desc->its_clear_cmd.dev, 542 desc->its_clear_cmd.event_id); 543 544 its_encode_cmd(cmd, GITS_CMD_CLEAR); 545 its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id); 546 its_encode_event_id(cmd, desc->its_clear_cmd.event_id); 547 548 its_fixup_cmd(cmd); 549 550 return valid_col(col); 551 } 552 553 static struct its_collection *its_build_invall_cmd(struct its_node *its, 554 struct its_cmd_block *cmd, 555 struct its_cmd_desc *desc) 556 { 557 its_encode_cmd(cmd, GITS_CMD_INVALL); 558 its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id); 559 560 its_fixup_cmd(cmd); 561 562 return NULL; 563 } 564 565 static struct its_vpe *its_build_vinvall_cmd(struct its_node *its, 566 struct its_cmd_block *cmd, 567 struct its_cmd_desc *desc) 568 { 569 its_encode_cmd(cmd, GITS_CMD_VINVALL); 570 its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id); 571 572 its_fixup_cmd(cmd); 573 574 return valid_vpe(its, desc->its_vinvall_cmd.vpe); 575 } 576 577 static struct its_vpe *its_build_vmapp_cmd(struct its_node *its, 578 struct its_cmd_block *cmd, 579 struct its_cmd_desc *desc) 580 { 581 unsigned long vpt_addr; 582 u64 target; 583 584 vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page)); 585 target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset; 586 587 its_encode_cmd(cmd, GITS_CMD_VMAPP); 588 its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id); 589 its_encode_valid(cmd, desc->its_vmapp_cmd.valid); 590 its_encode_target(cmd, target); 591 its_encode_vpt_addr(cmd, vpt_addr); 592 its_encode_vpt_size(cmd, LPI_NRBITS - 1); 593 594 its_fixup_cmd(cmd); 595 596 return valid_vpe(its, desc->its_vmapp_cmd.vpe); 597 } 598 599 static struct its_vpe *its_build_vmapti_cmd(struct its_node *its, 600 struct its_cmd_block *cmd, 601 struct its_cmd_desc *desc) 602 { 603 u32 db; 604 605 if (desc->its_vmapti_cmd.db_enabled) 606 db = desc->its_vmapti_cmd.vpe->vpe_db_lpi; 607 else 608 db = 1023; 609 610 its_encode_cmd(cmd, GITS_CMD_VMAPTI); 611 its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id); 612 its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id); 613 its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id); 614 its_encode_db_phys_id(cmd, db); 615 its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id); 616 617 its_fixup_cmd(cmd); 618 619 return valid_vpe(its, desc->its_vmapti_cmd.vpe); 620 } 621 622 static struct its_vpe *its_build_vmovi_cmd(struct its_node *its, 623 struct its_cmd_block *cmd, 624 struct its_cmd_desc *desc) 625 { 626 u32 db; 627 628 if (desc->its_vmovi_cmd.db_enabled) 629 db = desc->its_vmovi_cmd.vpe->vpe_db_lpi; 630 else 631 db = 1023; 632 633 its_encode_cmd(cmd, GITS_CMD_VMOVI); 634 its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id); 635 its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id); 636 its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id); 637 its_encode_db_phys_id(cmd, db); 638 its_encode_db_valid(cmd, true); 639 640 its_fixup_cmd(cmd); 641 642 return valid_vpe(its, desc->its_vmovi_cmd.vpe); 643 } 644 645 static struct its_vpe *its_build_vmovp_cmd(struct its_node *its, 646 struct its_cmd_block *cmd, 647 struct its_cmd_desc *desc) 648 { 649 u64 target; 650 651 target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset; 652 its_encode_cmd(cmd, GITS_CMD_VMOVP); 653 its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num); 654 its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list); 655 its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id); 656 its_encode_target(cmd, target); 657 658 its_fixup_cmd(cmd); 659 660 return valid_vpe(its, desc->its_vmovp_cmd.vpe); 661 } 662 663 static u64 its_cmd_ptr_to_offset(struct its_node *its, 664 struct its_cmd_block *ptr) 665 { 666 return (ptr - its->cmd_base) * sizeof(*ptr); 667 } 668 669 static int its_queue_full(struct its_node *its) 670 { 671 int widx; 672 int ridx; 673 674 widx = its->cmd_write - its->cmd_base; 675 ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block); 676 677 /* This is incredibly unlikely to happen, unless the ITS locks up. */ 678 if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx) 679 return 1; 680 681 return 0; 682 } 683 684 static struct its_cmd_block *its_allocate_entry(struct its_node *its) 685 { 686 struct its_cmd_block *cmd; 687 u32 count = 1000000; /* 1s! */ 688 689 while (its_queue_full(its)) { 690 count--; 691 if (!count) { 692 pr_err_ratelimited("ITS queue not draining\n"); 693 return NULL; 694 } 695 cpu_relax(); 696 udelay(1); 697 } 698 699 cmd = its->cmd_write++; 700 701 /* Handle queue wrapping */ 702 if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES)) 703 its->cmd_write = its->cmd_base; 704 705 /* Clear command */ 706 cmd->raw_cmd[0] = 0; 707 cmd->raw_cmd[1] = 0; 708 cmd->raw_cmd[2] = 0; 709 cmd->raw_cmd[3] = 0; 710 711 return cmd; 712 } 713 714 static struct its_cmd_block *its_post_commands(struct its_node *its) 715 { 716 u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write); 717 718 writel_relaxed(wr, its->base + GITS_CWRITER); 719 720 return its->cmd_write; 721 } 722 723 static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd) 724 { 725 /* 726 * Make sure the commands written to memory are observable by 727 * the ITS. 728 */ 729 if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING) 730 gic_flush_dcache_to_poc(cmd, sizeof(*cmd)); 731 else 732 dsb(ishst); 733 } 734 735 static int its_wait_for_range_completion(struct its_node *its, 736 struct its_cmd_block *from, 737 struct its_cmd_block *to) 738 { 739 u64 rd_idx, from_idx, to_idx; 740 u32 count = 1000000; /* 1s! */ 741 742 from_idx = its_cmd_ptr_to_offset(its, from); 743 to_idx = its_cmd_ptr_to_offset(its, to); 744 745 while (1) { 746 rd_idx = readl_relaxed(its->base + GITS_CREADR); 747 748 /* Direct case */ 749 if (from_idx < to_idx && rd_idx >= to_idx) 750 break; 751 752 /* Wrapped case */ 753 if (from_idx >= to_idx && rd_idx >= to_idx && rd_idx < from_idx) 754 break; 755 756 count--; 757 if (!count) { 758 pr_err_ratelimited("ITS queue timeout (%llu %llu %llu)\n", 759 from_idx, to_idx, rd_idx); 760 return -1; 761 } 762 cpu_relax(); 763 udelay(1); 764 } 765 766 return 0; 767 } 768 769 /* Warning, macro hell follows */ 770 #define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn) \ 771 void name(struct its_node *its, \ 772 buildtype builder, \ 773 struct its_cmd_desc *desc) \ 774 { \ 775 struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \ 776 synctype *sync_obj; \ 777 unsigned long flags; \ 778 \ 779 raw_spin_lock_irqsave(&its->lock, flags); \ 780 \ 781 cmd = its_allocate_entry(its); \ 782 if (!cmd) { /* We're soooooo screewed... */ \ 783 raw_spin_unlock_irqrestore(&its->lock, flags); \ 784 return; \ 785 } \ 786 sync_obj = builder(its, cmd, desc); \ 787 its_flush_cmd(its, cmd); \ 788 \ 789 if (sync_obj) { \ 790 sync_cmd = its_allocate_entry(its); \ 791 if (!sync_cmd) \ 792 goto post; \ 793 \ 794 buildfn(its, sync_cmd, sync_obj); \ 795 its_flush_cmd(its, sync_cmd); \ 796 } \ 797 \ 798 post: \ 799 next_cmd = its_post_commands(its); \ 800 raw_spin_unlock_irqrestore(&its->lock, flags); \ 801 \ 802 if (its_wait_for_range_completion(its, cmd, next_cmd)) \ 803 pr_err_ratelimited("ITS cmd %ps failed\n", builder); \ 804 } 805 806 static void its_build_sync_cmd(struct its_node *its, 807 struct its_cmd_block *sync_cmd, 808 struct its_collection *sync_col) 809 { 810 its_encode_cmd(sync_cmd, GITS_CMD_SYNC); 811 its_encode_target(sync_cmd, sync_col->target_address); 812 813 its_fixup_cmd(sync_cmd); 814 } 815 816 static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t, 817 struct its_collection, its_build_sync_cmd) 818 819 static void its_build_vsync_cmd(struct its_node *its, 820 struct its_cmd_block *sync_cmd, 821 struct its_vpe *sync_vpe) 822 { 823 its_encode_cmd(sync_cmd, GITS_CMD_VSYNC); 824 its_encode_vpeid(sync_cmd, sync_vpe->vpe_id); 825 826 its_fixup_cmd(sync_cmd); 827 } 828 829 static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t, 830 struct its_vpe, its_build_vsync_cmd) 831 832 static void its_send_int(struct its_device *dev, u32 event_id) 833 { 834 struct its_cmd_desc desc; 835 836 desc.its_int_cmd.dev = dev; 837 desc.its_int_cmd.event_id = event_id; 838 839 its_send_single_command(dev->its, its_build_int_cmd, &desc); 840 } 841 842 static void its_send_clear(struct its_device *dev, u32 event_id) 843 { 844 struct its_cmd_desc desc; 845 846 desc.its_clear_cmd.dev = dev; 847 desc.its_clear_cmd.event_id = event_id; 848 849 its_send_single_command(dev->its, its_build_clear_cmd, &desc); 850 } 851 852 static void its_send_inv(struct its_device *dev, u32 event_id) 853 { 854 struct its_cmd_desc desc; 855 856 desc.its_inv_cmd.dev = dev; 857 desc.its_inv_cmd.event_id = event_id; 858 859 its_send_single_command(dev->its, its_build_inv_cmd, &desc); 860 } 861 862 static void its_send_mapd(struct its_device *dev, int valid) 863 { 864 struct its_cmd_desc desc; 865 866 desc.its_mapd_cmd.dev = dev; 867 desc.its_mapd_cmd.valid = !!valid; 868 869 its_send_single_command(dev->its, its_build_mapd_cmd, &desc); 870 } 871 872 static void its_send_mapc(struct its_node *its, struct its_collection *col, 873 int valid) 874 { 875 struct its_cmd_desc desc; 876 877 desc.its_mapc_cmd.col = col; 878 desc.its_mapc_cmd.valid = !!valid; 879 880 its_send_single_command(its, its_build_mapc_cmd, &desc); 881 } 882 883 static void its_send_mapti(struct its_device *dev, u32 irq_id, u32 id) 884 { 885 struct its_cmd_desc desc; 886 887 desc.its_mapti_cmd.dev = dev; 888 desc.its_mapti_cmd.phys_id = irq_id; 889 desc.its_mapti_cmd.event_id = id; 890 891 its_send_single_command(dev->its, its_build_mapti_cmd, &desc); 892 } 893 894 static void its_send_movi(struct its_device *dev, 895 struct its_collection *col, u32 id) 896 { 897 struct its_cmd_desc desc; 898 899 desc.its_movi_cmd.dev = dev; 900 desc.its_movi_cmd.col = col; 901 desc.its_movi_cmd.event_id = id; 902 903 its_send_single_command(dev->its, its_build_movi_cmd, &desc); 904 } 905 906 static void its_send_discard(struct its_device *dev, u32 id) 907 { 908 struct its_cmd_desc desc; 909 910 desc.its_discard_cmd.dev = dev; 911 desc.its_discard_cmd.event_id = id; 912 913 its_send_single_command(dev->its, its_build_discard_cmd, &desc); 914 } 915 916 static void its_send_invall(struct its_node *its, struct its_collection *col) 917 { 918 struct its_cmd_desc desc; 919 920 desc.its_invall_cmd.col = col; 921 922 its_send_single_command(its, its_build_invall_cmd, &desc); 923 } 924 925 static void its_send_vmapti(struct its_device *dev, u32 id) 926 { 927 struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id]; 928 struct its_cmd_desc desc; 929 930 desc.its_vmapti_cmd.vpe = map->vpe; 931 desc.its_vmapti_cmd.dev = dev; 932 desc.its_vmapti_cmd.virt_id = map->vintid; 933 desc.its_vmapti_cmd.event_id = id; 934 desc.its_vmapti_cmd.db_enabled = map->db_enabled; 935 936 its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc); 937 } 938 939 static void its_send_vmovi(struct its_device *dev, u32 id) 940 { 941 struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id]; 942 struct its_cmd_desc desc; 943 944 desc.its_vmovi_cmd.vpe = map->vpe; 945 desc.its_vmovi_cmd.dev = dev; 946 desc.its_vmovi_cmd.event_id = id; 947 desc.its_vmovi_cmd.db_enabled = map->db_enabled; 948 949 its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc); 950 } 951 952 static void its_send_vmapp(struct its_node *its, 953 struct its_vpe *vpe, bool valid) 954 { 955 struct its_cmd_desc desc; 956 957 desc.its_vmapp_cmd.vpe = vpe; 958 desc.its_vmapp_cmd.valid = valid; 959 desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx]; 960 961 its_send_single_vcommand(its, its_build_vmapp_cmd, &desc); 962 } 963 964 static void its_send_vmovp(struct its_vpe *vpe) 965 { 966 struct its_cmd_desc desc; 967 struct its_node *its; 968 unsigned long flags; 969 int col_id = vpe->col_idx; 970 971 desc.its_vmovp_cmd.vpe = vpe; 972 desc.its_vmovp_cmd.its_list = (u16)its_list_map; 973 974 if (!its_list_map) { 975 its = list_first_entry(&its_nodes, struct its_node, entry); 976 desc.its_vmovp_cmd.seq_num = 0; 977 desc.its_vmovp_cmd.col = &its->collections[col_id]; 978 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc); 979 return; 980 } 981 982 /* 983 * Yet another marvel of the architecture. If using the 984 * its_list "feature", we need to make sure that all ITSs 985 * receive all VMOVP commands in the same order. The only way 986 * to guarantee this is to make vmovp a serialization point. 987 * 988 * Wall <-- Head. 989 */ 990 raw_spin_lock_irqsave(&vmovp_lock, flags); 991 992 desc.its_vmovp_cmd.seq_num = vmovp_seq_num++; 993 994 /* Emit VMOVPs */ 995 list_for_each_entry(its, &its_nodes, entry) { 996 if (!its->is_v4) 997 continue; 998 999 if (!vpe->its_vm->vlpi_count[its->list_nr]) 1000 continue; 1001 1002 desc.its_vmovp_cmd.col = &its->collections[col_id]; 1003 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc); 1004 } 1005 1006 raw_spin_unlock_irqrestore(&vmovp_lock, flags); 1007 } 1008 1009 static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe) 1010 { 1011 struct its_cmd_desc desc; 1012 1013 desc.its_vinvall_cmd.vpe = vpe; 1014 its_send_single_vcommand(its, its_build_vinvall_cmd, &desc); 1015 } 1016 1017 /* 1018 * irqchip functions - assumes MSI, mostly. 1019 */ 1020 1021 static inline u32 its_get_event_id(struct irq_data *d) 1022 { 1023 struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1024 return d->hwirq - its_dev->event_map.lpi_base; 1025 } 1026 1027 static void lpi_write_config(struct irq_data *d, u8 clr, u8 set) 1028 { 1029 irq_hw_number_t hwirq; 1030 void *va; 1031 u8 *cfg; 1032 1033 if (irqd_is_forwarded_to_vcpu(d)) { 1034 struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1035 u32 event = its_get_event_id(d); 1036 struct its_vlpi_map *map; 1037 1038 va = page_address(its_dev->event_map.vm->vprop_page); 1039 map = &its_dev->event_map.vlpi_maps[event]; 1040 hwirq = map->vintid; 1041 1042 /* Remember the updated property */ 1043 map->properties &= ~clr; 1044 map->properties |= set | LPI_PROP_GROUP1; 1045 } else { 1046 va = gic_rdists->prop_table_va; 1047 hwirq = d->hwirq; 1048 } 1049 1050 cfg = va + hwirq - 8192; 1051 *cfg &= ~clr; 1052 *cfg |= set | LPI_PROP_GROUP1; 1053 1054 /* 1055 * Make the above write visible to the redistributors. 1056 * And yes, we're flushing exactly: One. Single. Byte. 1057 * Humpf... 1058 */ 1059 if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING) 1060 gic_flush_dcache_to_poc(cfg, sizeof(*cfg)); 1061 else 1062 dsb(ishst); 1063 } 1064 1065 static void lpi_update_config(struct irq_data *d, u8 clr, u8 set) 1066 { 1067 struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1068 1069 lpi_write_config(d, clr, set); 1070 its_send_inv(its_dev, its_get_event_id(d)); 1071 } 1072 1073 static void its_vlpi_set_doorbell(struct irq_data *d, bool enable) 1074 { 1075 struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1076 u32 event = its_get_event_id(d); 1077 1078 if (its_dev->event_map.vlpi_maps[event].db_enabled == enable) 1079 return; 1080 1081 its_dev->event_map.vlpi_maps[event].db_enabled = enable; 1082 1083 /* 1084 * More fun with the architecture: 1085 * 1086 * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI 1087 * value or to 1023, depending on the enable bit. But that 1088 * would be issueing a mapping for an /existing/ DevID+EventID 1089 * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI 1090 * to the /same/ vPE, using this opportunity to adjust the 1091 * doorbell. Mouahahahaha. We loves it, Precious. 1092 */ 1093 its_send_vmovi(its_dev, event); 1094 } 1095 1096 static void its_mask_irq(struct irq_data *d) 1097 { 1098 if (irqd_is_forwarded_to_vcpu(d)) 1099 its_vlpi_set_doorbell(d, false); 1100 1101 lpi_update_config(d, LPI_PROP_ENABLED, 0); 1102 } 1103 1104 static void its_unmask_irq(struct irq_data *d) 1105 { 1106 if (irqd_is_forwarded_to_vcpu(d)) 1107 its_vlpi_set_doorbell(d, true); 1108 1109 lpi_update_config(d, 0, LPI_PROP_ENABLED); 1110 } 1111 1112 static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, 1113 bool force) 1114 { 1115 unsigned int cpu; 1116 const struct cpumask *cpu_mask = cpu_online_mask; 1117 struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1118 struct its_collection *target_col; 1119 u32 id = its_get_event_id(d); 1120 1121 /* A forwarded interrupt should use irq_set_vcpu_affinity */ 1122 if (irqd_is_forwarded_to_vcpu(d)) 1123 return -EINVAL; 1124 1125 /* lpi cannot be routed to a redistributor that is on a foreign node */ 1126 if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { 1127 if (its_dev->its->numa_node >= 0) { 1128 cpu_mask = cpumask_of_node(its_dev->its->numa_node); 1129 if (!cpumask_intersects(mask_val, cpu_mask)) 1130 return -EINVAL; 1131 } 1132 } 1133 1134 cpu = cpumask_any_and(mask_val, cpu_mask); 1135 1136 if (cpu >= nr_cpu_ids) 1137 return -EINVAL; 1138 1139 /* don't set the affinity when the target cpu is same as current one */ 1140 if (cpu != its_dev->event_map.col_map[id]) { 1141 target_col = &its_dev->its->collections[cpu]; 1142 its_send_movi(its_dev, target_col, id); 1143 its_dev->event_map.col_map[id] = cpu; 1144 irq_data_update_effective_affinity(d, cpumask_of(cpu)); 1145 } 1146 1147 return IRQ_SET_MASK_OK_DONE; 1148 } 1149 1150 static u64 its_irq_get_msi_base(struct its_device *its_dev) 1151 { 1152 struct its_node *its = its_dev->its; 1153 1154 return its->phys_base + GITS_TRANSLATER; 1155 } 1156 1157 static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) 1158 { 1159 struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1160 struct its_node *its; 1161 u64 addr; 1162 1163 its = its_dev->its; 1164 addr = its->get_msi_base(its_dev); 1165 1166 msg->address_lo = lower_32_bits(addr); 1167 msg->address_hi = upper_32_bits(addr); 1168 msg->data = its_get_event_id(d); 1169 1170 iommu_dma_compose_msi_msg(irq_data_get_msi_desc(d), msg); 1171 } 1172 1173 static int its_irq_set_irqchip_state(struct irq_data *d, 1174 enum irqchip_irq_state which, 1175 bool state) 1176 { 1177 struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1178 u32 event = its_get_event_id(d); 1179 1180 if (which != IRQCHIP_STATE_PENDING) 1181 return -EINVAL; 1182 1183 if (state) 1184 its_send_int(its_dev, event); 1185 else 1186 its_send_clear(its_dev, event); 1187 1188 return 0; 1189 } 1190 1191 static void its_map_vm(struct its_node *its, struct its_vm *vm) 1192 { 1193 unsigned long flags; 1194 1195 /* Not using the ITS list? Everything is always mapped. */ 1196 if (!its_list_map) 1197 return; 1198 1199 raw_spin_lock_irqsave(&vmovp_lock, flags); 1200 1201 /* 1202 * If the VM wasn't mapped yet, iterate over the vpes and get 1203 * them mapped now. 1204 */ 1205 vm->vlpi_count[its->list_nr]++; 1206 1207 if (vm->vlpi_count[its->list_nr] == 1) { 1208 int i; 1209 1210 for (i = 0; i < vm->nr_vpes; i++) { 1211 struct its_vpe *vpe = vm->vpes[i]; 1212 struct irq_data *d = irq_get_irq_data(vpe->irq); 1213 1214 /* Map the VPE to the first possible CPU */ 1215 vpe->col_idx = cpumask_first(cpu_online_mask); 1216 its_send_vmapp(its, vpe, true); 1217 its_send_vinvall(its, vpe); 1218 irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); 1219 } 1220 } 1221 1222 raw_spin_unlock_irqrestore(&vmovp_lock, flags); 1223 } 1224 1225 static void its_unmap_vm(struct its_node *its, struct its_vm *vm) 1226 { 1227 unsigned long flags; 1228 1229 /* Not using the ITS list? Everything is always mapped. */ 1230 if (!its_list_map) 1231 return; 1232 1233 raw_spin_lock_irqsave(&vmovp_lock, flags); 1234 1235 if (!--vm->vlpi_count[its->list_nr]) { 1236 int i; 1237 1238 for (i = 0; i < vm->nr_vpes; i++) 1239 its_send_vmapp(its, vm->vpes[i], false); 1240 } 1241 1242 raw_spin_unlock_irqrestore(&vmovp_lock, flags); 1243 } 1244 1245 static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info) 1246 { 1247 struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1248 u32 event = its_get_event_id(d); 1249 int ret = 0; 1250 1251 if (!info->map) 1252 return -EINVAL; 1253 1254 mutex_lock(&its_dev->event_map.vlpi_lock); 1255 1256 if (!its_dev->event_map.vm) { 1257 struct its_vlpi_map *maps; 1258 1259 maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps), 1260 GFP_KERNEL); 1261 if (!maps) { 1262 ret = -ENOMEM; 1263 goto out; 1264 } 1265 1266 its_dev->event_map.vm = info->map->vm; 1267 its_dev->event_map.vlpi_maps = maps; 1268 } else if (its_dev->event_map.vm != info->map->vm) { 1269 ret = -EINVAL; 1270 goto out; 1271 } 1272 1273 /* Get our private copy of the mapping information */ 1274 its_dev->event_map.vlpi_maps[event] = *info->map; 1275 1276 if (irqd_is_forwarded_to_vcpu(d)) { 1277 /* Already mapped, move it around */ 1278 its_send_vmovi(its_dev, event); 1279 } else { 1280 /* Ensure all the VPEs are mapped on this ITS */ 1281 its_map_vm(its_dev->its, info->map->vm); 1282 1283 /* 1284 * Flag the interrupt as forwarded so that we can 1285 * start poking the virtual property table. 1286 */ 1287 irqd_set_forwarded_to_vcpu(d); 1288 1289 /* Write out the property to the prop table */ 1290 lpi_write_config(d, 0xff, info->map->properties); 1291 1292 /* Drop the physical mapping */ 1293 its_send_discard(its_dev, event); 1294 1295 /* and install the virtual one */ 1296 its_send_vmapti(its_dev, event); 1297 1298 /* Increment the number of VLPIs */ 1299 its_dev->event_map.nr_vlpis++; 1300 } 1301 1302 out: 1303 mutex_unlock(&its_dev->event_map.vlpi_lock); 1304 return ret; 1305 } 1306 1307 static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info) 1308 { 1309 struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1310 u32 event = its_get_event_id(d); 1311 int ret = 0; 1312 1313 mutex_lock(&its_dev->event_map.vlpi_lock); 1314 1315 if (!its_dev->event_map.vm || 1316 !its_dev->event_map.vlpi_maps[event].vm) { 1317 ret = -EINVAL; 1318 goto out; 1319 } 1320 1321 /* Copy our mapping information to the incoming request */ 1322 *info->map = its_dev->event_map.vlpi_maps[event]; 1323 1324 out: 1325 mutex_unlock(&its_dev->event_map.vlpi_lock); 1326 return ret; 1327 } 1328 1329 static int its_vlpi_unmap(struct irq_data *d) 1330 { 1331 struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1332 u32 event = its_get_event_id(d); 1333 int ret = 0; 1334 1335 mutex_lock(&its_dev->event_map.vlpi_lock); 1336 1337 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) { 1338 ret = -EINVAL; 1339 goto out; 1340 } 1341 1342 /* Drop the virtual mapping */ 1343 its_send_discard(its_dev, event); 1344 1345 /* and restore the physical one */ 1346 irqd_clr_forwarded_to_vcpu(d); 1347 its_send_mapti(its_dev, d->hwirq, event); 1348 lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO | 1349 LPI_PROP_ENABLED | 1350 LPI_PROP_GROUP1)); 1351 1352 /* Potentially unmap the VM from this ITS */ 1353 its_unmap_vm(its_dev->its, its_dev->event_map.vm); 1354 1355 /* 1356 * Drop the refcount and make the device available again if 1357 * this was the last VLPI. 1358 */ 1359 if (!--its_dev->event_map.nr_vlpis) { 1360 its_dev->event_map.vm = NULL; 1361 kfree(its_dev->event_map.vlpi_maps); 1362 } 1363 1364 out: 1365 mutex_unlock(&its_dev->event_map.vlpi_lock); 1366 return ret; 1367 } 1368 1369 static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info) 1370 { 1371 struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1372 1373 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) 1374 return -EINVAL; 1375 1376 if (info->cmd_type == PROP_UPDATE_AND_INV_VLPI) 1377 lpi_update_config(d, 0xff, info->config); 1378 else 1379 lpi_write_config(d, 0xff, info->config); 1380 its_vlpi_set_doorbell(d, !!(info->config & LPI_PROP_ENABLED)); 1381 1382 return 0; 1383 } 1384 1385 static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) 1386 { 1387 struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1388 struct its_cmd_info *info = vcpu_info; 1389 1390 /* Need a v4 ITS */ 1391 if (!its_dev->its->is_v4) 1392 return -EINVAL; 1393 1394 /* Unmap request? */ 1395 if (!info) 1396 return its_vlpi_unmap(d); 1397 1398 switch (info->cmd_type) { 1399 case MAP_VLPI: 1400 return its_vlpi_map(d, info); 1401 1402 case GET_VLPI: 1403 return its_vlpi_get(d, info); 1404 1405 case PROP_UPDATE_VLPI: 1406 case PROP_UPDATE_AND_INV_VLPI: 1407 return its_vlpi_prop_update(d, info); 1408 1409 default: 1410 return -EINVAL; 1411 } 1412 } 1413 1414 static struct irq_chip its_irq_chip = { 1415 .name = "ITS", 1416 .irq_mask = its_mask_irq, 1417 .irq_unmask = its_unmask_irq, 1418 .irq_eoi = irq_chip_eoi_parent, 1419 .irq_set_affinity = its_set_affinity, 1420 .irq_compose_msi_msg = its_irq_compose_msi_msg, 1421 .irq_set_irqchip_state = its_irq_set_irqchip_state, 1422 .irq_set_vcpu_affinity = its_irq_set_vcpu_affinity, 1423 }; 1424 1425 1426 /* 1427 * How we allocate LPIs: 1428 * 1429 * lpi_range_list contains ranges of LPIs that are to available to 1430 * allocate from. To allocate LPIs, just pick the first range that 1431 * fits the required allocation, and reduce it by the required 1432 * amount. Once empty, remove the range from the list. 1433 * 1434 * To free a range of LPIs, add a free range to the list, sort it and 1435 * merge the result if the new range happens to be adjacent to an 1436 * already free block. 1437 * 1438 * The consequence of the above is that allocation is cost is low, but 1439 * freeing is expensive. We assumes that freeing rarely occurs. 1440 */ 1441 #define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */ 1442 1443 static DEFINE_MUTEX(lpi_range_lock); 1444 static LIST_HEAD(lpi_range_list); 1445 1446 struct lpi_range { 1447 struct list_head entry; 1448 u32 base_id; 1449 u32 span; 1450 }; 1451 1452 static struct lpi_range *mk_lpi_range(u32 base, u32 span) 1453 { 1454 struct lpi_range *range; 1455 1456 range = kmalloc(sizeof(*range), GFP_KERNEL); 1457 if (range) { 1458 range->base_id = base; 1459 range->span = span; 1460 } 1461 1462 return range; 1463 } 1464 1465 static int alloc_lpi_range(u32 nr_lpis, u32 *base) 1466 { 1467 struct lpi_range *range, *tmp; 1468 int err = -ENOSPC; 1469 1470 mutex_lock(&lpi_range_lock); 1471 1472 list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) { 1473 if (range->span >= nr_lpis) { 1474 *base = range->base_id; 1475 range->base_id += nr_lpis; 1476 range->span -= nr_lpis; 1477 1478 if (range->span == 0) { 1479 list_del(&range->entry); 1480 kfree(range); 1481 } 1482 1483 err = 0; 1484 break; 1485 } 1486 } 1487 1488 mutex_unlock(&lpi_range_lock); 1489 1490 pr_debug("ITS: alloc %u:%u\n", *base, nr_lpis); 1491 return err; 1492 } 1493 1494 static void merge_lpi_ranges(struct lpi_range *a, struct lpi_range *b) 1495 { 1496 if (&a->entry == &lpi_range_list || &b->entry == &lpi_range_list) 1497 return; 1498 if (a->base_id + a->span != b->base_id) 1499 return; 1500 b->base_id = a->base_id; 1501 b->span += a->span; 1502 list_del(&a->entry); 1503 kfree(a); 1504 } 1505 1506 static int free_lpi_range(u32 base, u32 nr_lpis) 1507 { 1508 struct lpi_range *new, *old; 1509 1510 new = mk_lpi_range(base, nr_lpis); 1511 if (!new) 1512 return -ENOMEM; 1513 1514 mutex_lock(&lpi_range_lock); 1515 1516 list_for_each_entry_reverse(old, &lpi_range_list, entry) { 1517 if (old->base_id < base) 1518 break; 1519 } 1520 /* 1521 * old is the last element with ->base_id smaller than base, 1522 * so new goes right after it. If there are no elements with 1523 * ->base_id smaller than base, &old->entry ends up pointing 1524 * at the head of the list, and inserting new it the start of 1525 * the list is the right thing to do in that case as well. 1526 */ 1527 list_add(&new->entry, &old->entry); 1528 /* 1529 * Now check if we can merge with the preceding and/or 1530 * following ranges. 1531 */ 1532 merge_lpi_ranges(old, new); 1533 merge_lpi_ranges(new, list_next_entry(new, entry)); 1534 1535 mutex_unlock(&lpi_range_lock); 1536 return 0; 1537 } 1538 1539 static int __init its_lpi_init(u32 id_bits) 1540 { 1541 u32 lpis = (1UL << id_bits) - 8192; 1542 u32 numlpis; 1543 int err; 1544 1545 numlpis = 1UL << GICD_TYPER_NUM_LPIS(gic_rdists->gicd_typer); 1546 1547 if (numlpis > 2 && !WARN_ON(numlpis > lpis)) { 1548 lpis = numlpis; 1549 pr_info("ITS: Using hypervisor restricted LPI range [%u]\n", 1550 lpis); 1551 } 1552 1553 /* 1554 * Initializing the allocator is just the same as freeing the 1555 * full range of LPIs. 1556 */ 1557 err = free_lpi_range(8192, lpis); 1558 pr_debug("ITS: Allocator initialized for %u LPIs\n", lpis); 1559 return err; 1560 } 1561 1562 static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids) 1563 { 1564 unsigned long *bitmap = NULL; 1565 int err = 0; 1566 1567 do { 1568 err = alloc_lpi_range(nr_irqs, base); 1569 if (!err) 1570 break; 1571 1572 nr_irqs /= 2; 1573 } while (nr_irqs > 0); 1574 1575 if (!nr_irqs) 1576 err = -ENOSPC; 1577 1578 if (err) 1579 goto out; 1580 1581 bitmap = kcalloc(BITS_TO_LONGS(nr_irqs), sizeof (long), GFP_ATOMIC); 1582 if (!bitmap) 1583 goto out; 1584 1585 *nr_ids = nr_irqs; 1586 1587 out: 1588 if (!bitmap) 1589 *base = *nr_ids = 0; 1590 1591 return bitmap; 1592 } 1593 1594 static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids) 1595 { 1596 WARN_ON(free_lpi_range(base, nr_ids)); 1597 kfree(bitmap); 1598 } 1599 1600 static void gic_reset_prop_table(void *va) 1601 { 1602 /* Priority 0xa0, Group-1, disabled */ 1603 memset(va, LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, LPI_PROPBASE_SZ); 1604 1605 /* Make sure the GIC will observe the written configuration */ 1606 gic_flush_dcache_to_poc(va, LPI_PROPBASE_SZ); 1607 } 1608 1609 static struct page *its_allocate_prop_table(gfp_t gfp_flags) 1610 { 1611 struct page *prop_page; 1612 1613 prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ)); 1614 if (!prop_page) 1615 return NULL; 1616 1617 gic_reset_prop_table(page_address(prop_page)); 1618 1619 return prop_page; 1620 } 1621 1622 static void its_free_prop_table(struct page *prop_page) 1623 { 1624 free_pages((unsigned long)page_address(prop_page), 1625 get_order(LPI_PROPBASE_SZ)); 1626 } 1627 1628 static bool gic_check_reserved_range(phys_addr_t addr, unsigned long size) 1629 { 1630 phys_addr_t start, end, addr_end; 1631 u64 i; 1632 1633 /* 1634 * We don't bother checking for a kdump kernel as by 1635 * construction, the LPI tables are out of this kernel's 1636 * memory map. 1637 */ 1638 if (is_kdump_kernel()) 1639 return true; 1640 1641 addr_end = addr + size - 1; 1642 1643 for_each_reserved_mem_region(i, &start, &end) { 1644 if (addr >= start && addr_end <= end) 1645 return true; 1646 } 1647 1648 /* Not found, not a good sign... */ 1649 pr_warn("GICv3: Expected reserved range [%pa:%pa], not found\n", 1650 &addr, &addr_end); 1651 add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); 1652 return false; 1653 } 1654 1655 static int gic_reserve_range(phys_addr_t addr, unsigned long size) 1656 { 1657 if (efi_enabled(EFI_CONFIG_TABLES)) 1658 return efi_mem_reserve_persistent(addr, size); 1659 1660 return 0; 1661 } 1662 1663 static int __init its_setup_lpi_prop_table(void) 1664 { 1665 if (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) { 1666 u64 val; 1667 1668 val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER); 1669 lpi_id_bits = (val & GICR_PROPBASER_IDBITS_MASK) + 1; 1670 1671 gic_rdists->prop_table_pa = val & GENMASK_ULL(51, 12); 1672 gic_rdists->prop_table_va = memremap(gic_rdists->prop_table_pa, 1673 LPI_PROPBASE_SZ, 1674 MEMREMAP_WB); 1675 gic_reset_prop_table(gic_rdists->prop_table_va); 1676 } else { 1677 struct page *page; 1678 1679 lpi_id_bits = min_t(u32, 1680 GICD_TYPER_ID_BITS(gic_rdists->gicd_typer), 1681 ITS_MAX_LPI_NRBITS); 1682 page = its_allocate_prop_table(GFP_NOWAIT); 1683 if (!page) { 1684 pr_err("Failed to allocate PROPBASE\n"); 1685 return -ENOMEM; 1686 } 1687 1688 gic_rdists->prop_table_pa = page_to_phys(page); 1689 gic_rdists->prop_table_va = page_address(page); 1690 WARN_ON(gic_reserve_range(gic_rdists->prop_table_pa, 1691 LPI_PROPBASE_SZ)); 1692 } 1693 1694 pr_info("GICv3: using LPI property table @%pa\n", 1695 &gic_rdists->prop_table_pa); 1696 1697 return its_lpi_init(lpi_id_bits); 1698 } 1699 1700 static const char *its_base_type_string[] = { 1701 [GITS_BASER_TYPE_DEVICE] = "Devices", 1702 [GITS_BASER_TYPE_VCPU] = "Virtual CPUs", 1703 [GITS_BASER_TYPE_RESERVED3] = "Reserved (3)", 1704 [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections", 1705 [GITS_BASER_TYPE_RESERVED5] = "Reserved (5)", 1706 [GITS_BASER_TYPE_RESERVED6] = "Reserved (6)", 1707 [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)", 1708 }; 1709 1710 static u64 its_read_baser(struct its_node *its, struct its_baser *baser) 1711 { 1712 u32 idx = baser - its->tables; 1713 1714 return gits_read_baser(its->base + GITS_BASER + (idx << 3)); 1715 } 1716 1717 static void its_write_baser(struct its_node *its, struct its_baser *baser, 1718 u64 val) 1719 { 1720 u32 idx = baser - its->tables; 1721 1722 gits_write_baser(val, its->base + GITS_BASER + (idx << 3)); 1723 baser->val = its_read_baser(its, baser); 1724 } 1725 1726 static int its_setup_baser(struct its_node *its, struct its_baser *baser, 1727 u64 cache, u64 shr, u32 psz, u32 order, 1728 bool indirect) 1729 { 1730 u64 val = its_read_baser(its, baser); 1731 u64 esz = GITS_BASER_ENTRY_SIZE(val); 1732 u64 type = GITS_BASER_TYPE(val); 1733 u64 baser_phys, tmp; 1734 u32 alloc_pages; 1735 struct page *page; 1736 void *base; 1737 1738 retry_alloc_baser: 1739 alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz); 1740 if (alloc_pages > GITS_BASER_PAGES_MAX) { 1741 pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n", 1742 &its->phys_base, its_base_type_string[type], 1743 alloc_pages, GITS_BASER_PAGES_MAX); 1744 alloc_pages = GITS_BASER_PAGES_MAX; 1745 order = get_order(GITS_BASER_PAGES_MAX * psz); 1746 } 1747 1748 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order); 1749 if (!page) 1750 return -ENOMEM; 1751 1752 base = (void *)page_address(page); 1753 baser_phys = virt_to_phys(base); 1754 1755 /* Check if the physical address of the memory is above 48bits */ 1756 if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48)) { 1757 1758 /* 52bit PA is supported only when PageSize=64K */ 1759 if (psz != SZ_64K) { 1760 pr_err("ITS: no 52bit PA support when psz=%d\n", psz); 1761 free_pages((unsigned long)base, order); 1762 return -ENXIO; 1763 } 1764 1765 /* Convert 52bit PA to 48bit field */ 1766 baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys); 1767 } 1768 1769 retry_baser: 1770 val = (baser_phys | 1771 (type << GITS_BASER_TYPE_SHIFT) | 1772 ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) | 1773 ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) | 1774 cache | 1775 shr | 1776 GITS_BASER_VALID); 1777 1778 val |= indirect ? GITS_BASER_INDIRECT : 0x0; 1779 1780 switch (psz) { 1781 case SZ_4K: 1782 val |= GITS_BASER_PAGE_SIZE_4K; 1783 break; 1784 case SZ_16K: 1785 val |= GITS_BASER_PAGE_SIZE_16K; 1786 break; 1787 case SZ_64K: 1788 val |= GITS_BASER_PAGE_SIZE_64K; 1789 break; 1790 } 1791 1792 its_write_baser(its, baser, val); 1793 tmp = baser->val; 1794 1795 if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) { 1796 /* 1797 * Shareability didn't stick. Just use 1798 * whatever the read reported, which is likely 1799 * to be the only thing this redistributor 1800 * supports. If that's zero, make it 1801 * non-cacheable as well. 1802 */ 1803 shr = tmp & GITS_BASER_SHAREABILITY_MASK; 1804 if (!shr) { 1805 cache = GITS_BASER_nC; 1806 gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order)); 1807 } 1808 goto retry_baser; 1809 } 1810 1811 if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) { 1812 /* 1813 * Page size didn't stick. Let's try a smaller 1814 * size and retry. If we reach 4K, then 1815 * something is horribly wrong... 1816 */ 1817 free_pages((unsigned long)base, order); 1818 baser->base = NULL; 1819 1820 switch (psz) { 1821 case SZ_16K: 1822 psz = SZ_4K; 1823 goto retry_alloc_baser; 1824 case SZ_64K: 1825 psz = SZ_16K; 1826 goto retry_alloc_baser; 1827 } 1828 } 1829 1830 if (val != tmp) { 1831 pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n", 1832 &its->phys_base, its_base_type_string[type], 1833 val, tmp); 1834 free_pages((unsigned long)base, order); 1835 return -ENXIO; 1836 } 1837 1838 baser->order = order; 1839 baser->base = base; 1840 baser->psz = psz; 1841 tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz; 1842 1843 pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n", 1844 &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp), 1845 its_base_type_string[type], 1846 (unsigned long)virt_to_phys(base), 1847 indirect ? "indirect" : "flat", (int)esz, 1848 psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT); 1849 1850 return 0; 1851 } 1852 1853 static bool its_parse_indirect_baser(struct its_node *its, 1854 struct its_baser *baser, 1855 u32 psz, u32 *order, u32 ids) 1856 { 1857 u64 tmp = its_read_baser(its, baser); 1858 u64 type = GITS_BASER_TYPE(tmp); 1859 u64 esz = GITS_BASER_ENTRY_SIZE(tmp); 1860 u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb; 1861 u32 new_order = *order; 1862 bool indirect = false; 1863 1864 /* No need to enable Indirection if memory requirement < (psz*2)bytes */ 1865 if ((esz << ids) > (psz * 2)) { 1866 /* 1867 * Find out whether hw supports a single or two-level table by 1868 * table by reading bit at offset '62' after writing '1' to it. 1869 */ 1870 its_write_baser(its, baser, val | GITS_BASER_INDIRECT); 1871 indirect = !!(baser->val & GITS_BASER_INDIRECT); 1872 1873 if (indirect) { 1874 /* 1875 * The size of the lvl2 table is equal to ITS page size 1876 * which is 'psz'. For computing lvl1 table size, 1877 * subtract ID bits that sparse lvl2 table from 'ids' 1878 * which is reported by ITS hardware times lvl1 table 1879 * entry size. 1880 */ 1881 ids -= ilog2(psz / (int)esz); 1882 esz = GITS_LVL1_ENTRY_SIZE; 1883 } 1884 } 1885 1886 /* 1887 * Allocate as many entries as required to fit the 1888 * range of device IDs that the ITS can grok... The ID 1889 * space being incredibly sparse, this results in a 1890 * massive waste of memory if two-level device table 1891 * feature is not supported by hardware. 1892 */ 1893 new_order = max_t(u32, get_order(esz << ids), new_order); 1894 if (new_order >= MAX_ORDER) { 1895 new_order = MAX_ORDER - 1; 1896 ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz); 1897 pr_warn("ITS@%pa: %s Table too large, reduce ids %u->%u\n", 1898 &its->phys_base, its_base_type_string[type], 1899 its->device_ids, ids); 1900 } 1901 1902 *order = new_order; 1903 1904 return indirect; 1905 } 1906 1907 static void its_free_tables(struct its_node *its) 1908 { 1909 int i; 1910 1911 for (i = 0; i < GITS_BASER_NR_REGS; i++) { 1912 if (its->tables[i].base) { 1913 free_pages((unsigned long)its->tables[i].base, 1914 its->tables[i].order); 1915 its->tables[i].base = NULL; 1916 } 1917 } 1918 } 1919 1920 static int its_alloc_tables(struct its_node *its) 1921 { 1922 u64 shr = GITS_BASER_InnerShareable; 1923 u64 cache = GITS_BASER_RaWaWb; 1924 u32 psz = SZ_64K; 1925 int err, i; 1926 1927 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) 1928 /* erratum 24313: ignore memory access type */ 1929 cache = GITS_BASER_nCnB; 1930 1931 for (i = 0; i < GITS_BASER_NR_REGS; i++) { 1932 struct its_baser *baser = its->tables + i; 1933 u64 val = its_read_baser(its, baser); 1934 u64 type = GITS_BASER_TYPE(val); 1935 u32 order = get_order(psz); 1936 bool indirect = false; 1937 1938 switch (type) { 1939 case GITS_BASER_TYPE_NONE: 1940 continue; 1941 1942 case GITS_BASER_TYPE_DEVICE: 1943 indirect = its_parse_indirect_baser(its, baser, 1944 psz, &order, 1945 its->device_ids); 1946 break; 1947 1948 case GITS_BASER_TYPE_VCPU: 1949 indirect = its_parse_indirect_baser(its, baser, 1950 psz, &order, 1951 ITS_MAX_VPEID_BITS); 1952 break; 1953 } 1954 1955 err = its_setup_baser(its, baser, cache, shr, psz, order, indirect); 1956 if (err < 0) { 1957 its_free_tables(its); 1958 return err; 1959 } 1960 1961 /* Update settings which will be used for next BASERn */ 1962 psz = baser->psz; 1963 cache = baser->val & GITS_BASER_CACHEABILITY_MASK; 1964 shr = baser->val & GITS_BASER_SHAREABILITY_MASK; 1965 } 1966 1967 return 0; 1968 } 1969 1970 static int its_alloc_collections(struct its_node *its) 1971 { 1972 int i; 1973 1974 its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections), 1975 GFP_KERNEL); 1976 if (!its->collections) 1977 return -ENOMEM; 1978 1979 for (i = 0; i < nr_cpu_ids; i++) 1980 its->collections[i].target_address = ~0ULL; 1981 1982 return 0; 1983 } 1984 1985 static struct page *its_allocate_pending_table(gfp_t gfp_flags) 1986 { 1987 struct page *pend_page; 1988 1989 pend_page = alloc_pages(gfp_flags | __GFP_ZERO, 1990 get_order(LPI_PENDBASE_SZ)); 1991 if (!pend_page) 1992 return NULL; 1993 1994 /* Make sure the GIC will observe the zero-ed page */ 1995 gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ); 1996 1997 return pend_page; 1998 } 1999 2000 static void its_free_pending_table(struct page *pt) 2001 { 2002 free_pages((unsigned long)page_address(pt), get_order(LPI_PENDBASE_SZ)); 2003 } 2004 2005 /* 2006 * Booting with kdump and LPIs enabled is generally fine. Any other 2007 * case is wrong in the absence of firmware/EFI support. 2008 */ 2009 static bool enabled_lpis_allowed(void) 2010 { 2011 phys_addr_t addr; 2012 u64 val; 2013 2014 /* Check whether the property table is in a reserved region */ 2015 val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER); 2016 addr = val & GENMASK_ULL(51, 12); 2017 2018 return gic_check_reserved_range(addr, LPI_PROPBASE_SZ); 2019 } 2020 2021 static int __init allocate_lpi_tables(void) 2022 { 2023 u64 val; 2024 int err, cpu; 2025 2026 /* 2027 * If LPIs are enabled while we run this from the boot CPU, 2028 * flag the RD tables as pre-allocated if the stars do align. 2029 */ 2030 val = readl_relaxed(gic_data_rdist_rd_base() + GICR_CTLR); 2031 if ((val & GICR_CTLR_ENABLE_LPIS) && enabled_lpis_allowed()) { 2032 gic_rdists->flags |= (RDIST_FLAGS_RD_TABLES_PREALLOCATED | 2033 RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING); 2034 pr_info("GICv3: Using preallocated redistributor tables\n"); 2035 } 2036 2037 err = its_setup_lpi_prop_table(); 2038 if (err) 2039 return err; 2040 2041 /* 2042 * We allocate all the pending tables anyway, as we may have a 2043 * mix of RDs that have had LPIs enabled, and some that 2044 * don't. We'll free the unused ones as each CPU comes online. 2045 */ 2046 for_each_possible_cpu(cpu) { 2047 struct page *pend_page; 2048 2049 pend_page = its_allocate_pending_table(GFP_NOWAIT); 2050 if (!pend_page) { 2051 pr_err("Failed to allocate PENDBASE for CPU%d\n", cpu); 2052 return -ENOMEM; 2053 } 2054 2055 gic_data_rdist_cpu(cpu)->pend_page = pend_page; 2056 } 2057 2058 return 0; 2059 } 2060 2061 static u64 its_clear_vpend_valid(void __iomem *vlpi_base) 2062 { 2063 u32 count = 1000000; /* 1s! */ 2064 bool clean; 2065 u64 val; 2066 2067 val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER); 2068 val &= ~GICR_VPENDBASER_Valid; 2069 gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); 2070 2071 do { 2072 val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER); 2073 clean = !(val & GICR_VPENDBASER_Dirty); 2074 if (!clean) { 2075 count--; 2076 cpu_relax(); 2077 udelay(1); 2078 } 2079 } while (!clean && count); 2080 2081 return val; 2082 } 2083 2084 static void its_cpu_init_lpis(void) 2085 { 2086 void __iomem *rbase = gic_data_rdist_rd_base(); 2087 struct page *pend_page; 2088 phys_addr_t paddr; 2089 u64 val, tmp; 2090 2091 if (gic_data_rdist()->lpi_enabled) 2092 return; 2093 2094 val = readl_relaxed(rbase + GICR_CTLR); 2095 if ((gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) && 2096 (val & GICR_CTLR_ENABLE_LPIS)) { 2097 /* 2098 * Check that we get the same property table on all 2099 * RDs. If we don't, this is hopeless. 2100 */ 2101 paddr = gicr_read_propbaser(rbase + GICR_PROPBASER); 2102 paddr &= GENMASK_ULL(51, 12); 2103 if (WARN_ON(gic_rdists->prop_table_pa != paddr)) 2104 add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); 2105 2106 paddr = gicr_read_pendbaser(rbase + GICR_PENDBASER); 2107 paddr &= GENMASK_ULL(51, 16); 2108 2109 WARN_ON(!gic_check_reserved_range(paddr, LPI_PENDBASE_SZ)); 2110 its_free_pending_table(gic_data_rdist()->pend_page); 2111 gic_data_rdist()->pend_page = NULL; 2112 2113 goto out; 2114 } 2115 2116 pend_page = gic_data_rdist()->pend_page; 2117 paddr = page_to_phys(pend_page); 2118 WARN_ON(gic_reserve_range(paddr, LPI_PENDBASE_SZ)); 2119 2120 /* set PROPBASE */ 2121 val = (gic_rdists->prop_table_pa | 2122 GICR_PROPBASER_InnerShareable | 2123 GICR_PROPBASER_RaWaWb | 2124 ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK)); 2125 2126 gicr_write_propbaser(val, rbase + GICR_PROPBASER); 2127 tmp = gicr_read_propbaser(rbase + GICR_PROPBASER); 2128 2129 if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) { 2130 if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) { 2131 /* 2132 * The HW reports non-shareable, we must 2133 * remove the cacheability attributes as 2134 * well. 2135 */ 2136 val &= ~(GICR_PROPBASER_SHAREABILITY_MASK | 2137 GICR_PROPBASER_CACHEABILITY_MASK); 2138 val |= GICR_PROPBASER_nC; 2139 gicr_write_propbaser(val, rbase + GICR_PROPBASER); 2140 } 2141 pr_info_once("GIC: using cache flushing for LPI property table\n"); 2142 gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING; 2143 } 2144 2145 /* set PENDBASE */ 2146 val = (page_to_phys(pend_page) | 2147 GICR_PENDBASER_InnerShareable | 2148 GICR_PENDBASER_RaWaWb); 2149 2150 gicr_write_pendbaser(val, rbase + GICR_PENDBASER); 2151 tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER); 2152 2153 if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) { 2154 /* 2155 * The HW reports non-shareable, we must remove the 2156 * cacheability attributes as well. 2157 */ 2158 val &= ~(GICR_PENDBASER_SHAREABILITY_MASK | 2159 GICR_PENDBASER_CACHEABILITY_MASK); 2160 val |= GICR_PENDBASER_nC; 2161 gicr_write_pendbaser(val, rbase + GICR_PENDBASER); 2162 } 2163 2164 /* Enable LPIs */ 2165 val = readl_relaxed(rbase + GICR_CTLR); 2166 val |= GICR_CTLR_ENABLE_LPIS; 2167 writel_relaxed(val, rbase + GICR_CTLR); 2168 2169 if (gic_rdists->has_vlpis) { 2170 void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); 2171 2172 /* 2173 * It's possible for CPU to receive VLPIs before it is 2174 * sheduled as a vPE, especially for the first CPU, and the 2175 * VLPI with INTID larger than 2^(IDbits+1) will be considered 2176 * as out of range and dropped by GIC. 2177 * So we initialize IDbits to known value to avoid VLPI drop. 2178 */ 2179 val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK; 2180 pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n", 2181 smp_processor_id(), val); 2182 gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); 2183 2184 /* 2185 * Also clear Valid bit of GICR_VPENDBASER, in case some 2186 * ancient programming gets left in and has possibility of 2187 * corrupting memory. 2188 */ 2189 val = its_clear_vpend_valid(vlpi_base); 2190 WARN_ON(val & GICR_VPENDBASER_Dirty); 2191 } 2192 2193 /* Make sure the GIC has seen the above */ 2194 dsb(sy); 2195 out: 2196 gic_data_rdist()->lpi_enabled = true; 2197 pr_info("GICv3: CPU%d: using %s LPI pending table @%pa\n", 2198 smp_processor_id(), 2199 gic_data_rdist()->pend_page ? "allocated" : "reserved", 2200 &paddr); 2201 } 2202 2203 static void its_cpu_init_collection(struct its_node *its) 2204 { 2205 int cpu = smp_processor_id(); 2206 u64 target; 2207 2208 /* avoid cross node collections and its mapping */ 2209 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { 2210 struct device_node *cpu_node; 2211 2212 cpu_node = of_get_cpu_node(cpu, NULL); 2213 if (its->numa_node != NUMA_NO_NODE && 2214 its->numa_node != of_node_to_nid(cpu_node)) 2215 return; 2216 } 2217 2218 /* 2219 * We now have to bind each collection to its target 2220 * redistributor. 2221 */ 2222 if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) { 2223 /* 2224 * This ITS wants the physical address of the 2225 * redistributor. 2226 */ 2227 target = gic_data_rdist()->phys_base; 2228 } else { 2229 /* This ITS wants a linear CPU number. */ 2230 target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER); 2231 target = GICR_TYPER_CPU_NUMBER(target) << 16; 2232 } 2233 2234 /* Perform collection mapping */ 2235 its->collections[cpu].target_address = target; 2236 its->collections[cpu].col_id = cpu; 2237 2238 its_send_mapc(its, &its->collections[cpu], 1); 2239 its_send_invall(its, &its->collections[cpu]); 2240 } 2241 2242 static void its_cpu_init_collections(void) 2243 { 2244 struct its_node *its; 2245 2246 raw_spin_lock(&its_lock); 2247 2248 list_for_each_entry(its, &its_nodes, entry) 2249 its_cpu_init_collection(its); 2250 2251 raw_spin_unlock(&its_lock); 2252 } 2253 2254 static struct its_device *its_find_device(struct its_node *its, u32 dev_id) 2255 { 2256 struct its_device *its_dev = NULL, *tmp; 2257 unsigned long flags; 2258 2259 raw_spin_lock_irqsave(&its->lock, flags); 2260 2261 list_for_each_entry(tmp, &its->its_device_list, entry) { 2262 if (tmp->device_id == dev_id) { 2263 its_dev = tmp; 2264 break; 2265 } 2266 } 2267 2268 raw_spin_unlock_irqrestore(&its->lock, flags); 2269 2270 return its_dev; 2271 } 2272 2273 static struct its_baser *its_get_baser(struct its_node *its, u32 type) 2274 { 2275 int i; 2276 2277 for (i = 0; i < GITS_BASER_NR_REGS; i++) { 2278 if (GITS_BASER_TYPE(its->tables[i].val) == type) 2279 return &its->tables[i]; 2280 } 2281 2282 return NULL; 2283 } 2284 2285 static bool its_alloc_table_entry(struct its_node *its, 2286 struct its_baser *baser, u32 id) 2287 { 2288 struct page *page; 2289 u32 esz, idx; 2290 __le64 *table; 2291 2292 /* Don't allow device id that exceeds single, flat table limit */ 2293 esz = GITS_BASER_ENTRY_SIZE(baser->val); 2294 if (!(baser->val & GITS_BASER_INDIRECT)) 2295 return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz)); 2296 2297 /* Compute 1st level table index & check if that exceeds table limit */ 2298 idx = id >> ilog2(baser->psz / esz); 2299 if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE)) 2300 return false; 2301 2302 table = baser->base; 2303 2304 /* Allocate memory for 2nd level table */ 2305 if (!table[idx]) { 2306 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, 2307 get_order(baser->psz)); 2308 if (!page) 2309 return false; 2310 2311 /* Flush Lvl2 table to PoC if hw doesn't support coherency */ 2312 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK)) 2313 gic_flush_dcache_to_poc(page_address(page), baser->psz); 2314 2315 table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID); 2316 2317 /* Flush Lvl1 entry to PoC if hw doesn't support coherency */ 2318 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK)) 2319 gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE); 2320 2321 /* Ensure updated table contents are visible to ITS hardware */ 2322 dsb(sy); 2323 } 2324 2325 return true; 2326 } 2327 2328 static bool its_alloc_device_table(struct its_node *its, u32 dev_id) 2329 { 2330 struct its_baser *baser; 2331 2332 baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE); 2333 2334 /* Don't allow device id that exceeds ITS hardware limit */ 2335 if (!baser) 2336 return (ilog2(dev_id) < its->device_ids); 2337 2338 return its_alloc_table_entry(its, baser, dev_id); 2339 } 2340 2341 static bool its_alloc_vpe_table(u32 vpe_id) 2342 { 2343 struct its_node *its; 2344 2345 /* 2346 * Make sure the L2 tables are allocated on *all* v4 ITSs. We 2347 * could try and only do it on ITSs corresponding to devices 2348 * that have interrupts targeted at this VPE, but the 2349 * complexity becomes crazy (and you have tons of memory 2350 * anyway, right?). 2351 */ 2352 list_for_each_entry(its, &its_nodes, entry) { 2353 struct its_baser *baser; 2354 2355 if (!its->is_v4) 2356 continue; 2357 2358 baser = its_get_baser(its, GITS_BASER_TYPE_VCPU); 2359 if (!baser) 2360 return false; 2361 2362 if (!its_alloc_table_entry(its, baser, vpe_id)) 2363 return false; 2364 } 2365 2366 return true; 2367 } 2368 2369 static struct its_device *its_create_device(struct its_node *its, u32 dev_id, 2370 int nvecs, bool alloc_lpis) 2371 { 2372 struct its_device *dev; 2373 unsigned long *lpi_map = NULL; 2374 unsigned long flags; 2375 u16 *col_map = NULL; 2376 void *itt; 2377 int lpi_base; 2378 int nr_lpis; 2379 int nr_ites; 2380 int sz; 2381 2382 if (!its_alloc_device_table(its, dev_id)) 2383 return NULL; 2384 2385 if (WARN_ON(!is_power_of_2(nvecs))) 2386 nvecs = roundup_pow_of_two(nvecs); 2387 2388 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 2389 /* 2390 * Even if the device wants a single LPI, the ITT must be 2391 * sized as a power of two (and you need at least one bit...). 2392 */ 2393 nr_ites = max(2, nvecs); 2394 sz = nr_ites * its->ite_size; 2395 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; 2396 itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node); 2397 if (alloc_lpis) { 2398 lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis); 2399 if (lpi_map) 2400 col_map = kcalloc(nr_lpis, sizeof(*col_map), 2401 GFP_KERNEL); 2402 } else { 2403 col_map = kcalloc(nr_ites, sizeof(*col_map), GFP_KERNEL); 2404 nr_lpis = 0; 2405 lpi_base = 0; 2406 } 2407 2408 if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) { 2409 kfree(dev); 2410 kfree(itt); 2411 kfree(lpi_map); 2412 kfree(col_map); 2413 return NULL; 2414 } 2415 2416 gic_flush_dcache_to_poc(itt, sz); 2417 2418 dev->its = its; 2419 dev->itt = itt; 2420 dev->nr_ites = nr_ites; 2421 dev->event_map.lpi_map = lpi_map; 2422 dev->event_map.col_map = col_map; 2423 dev->event_map.lpi_base = lpi_base; 2424 dev->event_map.nr_lpis = nr_lpis; 2425 mutex_init(&dev->event_map.vlpi_lock); 2426 dev->device_id = dev_id; 2427 INIT_LIST_HEAD(&dev->entry); 2428 2429 raw_spin_lock_irqsave(&its->lock, flags); 2430 list_add(&dev->entry, &its->its_device_list); 2431 raw_spin_unlock_irqrestore(&its->lock, flags); 2432 2433 /* Map device to its ITT */ 2434 its_send_mapd(dev, 1); 2435 2436 return dev; 2437 } 2438 2439 static void its_free_device(struct its_device *its_dev) 2440 { 2441 unsigned long flags; 2442 2443 raw_spin_lock_irqsave(&its_dev->its->lock, flags); 2444 list_del(&its_dev->entry); 2445 raw_spin_unlock_irqrestore(&its_dev->its->lock, flags); 2446 kfree(its_dev->itt); 2447 kfree(its_dev); 2448 } 2449 2450 static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number_t *hwirq) 2451 { 2452 int idx; 2453 2454 idx = bitmap_find_free_region(dev->event_map.lpi_map, 2455 dev->event_map.nr_lpis, 2456 get_count_order(nvecs)); 2457 if (idx < 0) 2458 return -ENOSPC; 2459 2460 *hwirq = dev->event_map.lpi_base + idx; 2461 set_bit(idx, dev->event_map.lpi_map); 2462 2463 return 0; 2464 } 2465 2466 static int its_msi_prepare(struct irq_domain *domain, struct device *dev, 2467 int nvec, msi_alloc_info_t *info) 2468 { 2469 struct its_node *its; 2470 struct its_device *its_dev; 2471 struct msi_domain_info *msi_info; 2472 u32 dev_id; 2473 int err = 0; 2474 2475 /* 2476 * We ignore "dev" entirely, and rely on the dev_id that has 2477 * been passed via the scratchpad. This limits this domain's 2478 * usefulness to upper layers that definitely know that they 2479 * are built on top of the ITS. 2480 */ 2481 dev_id = info->scratchpad[0].ul; 2482 2483 msi_info = msi_get_domain_info(domain); 2484 its = msi_info->data; 2485 2486 if (!gic_rdists->has_direct_lpi && 2487 vpe_proxy.dev && 2488 vpe_proxy.dev->its == its && 2489 dev_id == vpe_proxy.dev->device_id) { 2490 /* Bad luck. Get yourself a better implementation */ 2491 WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n", 2492 dev_id); 2493 return -EINVAL; 2494 } 2495 2496 mutex_lock(&its->dev_alloc_lock); 2497 its_dev = its_find_device(its, dev_id); 2498 if (its_dev) { 2499 /* 2500 * We already have seen this ID, probably through 2501 * another alias (PCI bridge of some sort). No need to 2502 * create the device. 2503 */ 2504 its_dev->shared = true; 2505 pr_debug("Reusing ITT for devID %x\n", dev_id); 2506 goto out; 2507 } 2508 2509 its_dev = its_create_device(its, dev_id, nvec, true); 2510 if (!its_dev) { 2511 err = -ENOMEM; 2512 goto out; 2513 } 2514 2515 pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec)); 2516 out: 2517 mutex_unlock(&its->dev_alloc_lock); 2518 info->scratchpad[0].ptr = its_dev; 2519 return err; 2520 } 2521 2522 static struct msi_domain_ops its_msi_domain_ops = { 2523 .msi_prepare = its_msi_prepare, 2524 }; 2525 2526 static int its_irq_gic_domain_alloc(struct irq_domain *domain, 2527 unsigned int virq, 2528 irq_hw_number_t hwirq) 2529 { 2530 struct irq_fwspec fwspec; 2531 2532 if (irq_domain_get_of_node(domain->parent)) { 2533 fwspec.fwnode = domain->parent->fwnode; 2534 fwspec.param_count = 3; 2535 fwspec.param[0] = GIC_IRQ_TYPE_LPI; 2536 fwspec.param[1] = hwirq; 2537 fwspec.param[2] = IRQ_TYPE_EDGE_RISING; 2538 } else if (is_fwnode_irqchip(domain->parent->fwnode)) { 2539 fwspec.fwnode = domain->parent->fwnode; 2540 fwspec.param_count = 2; 2541 fwspec.param[0] = hwirq; 2542 fwspec.param[1] = IRQ_TYPE_EDGE_RISING; 2543 } else { 2544 return -EINVAL; 2545 } 2546 2547 return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec); 2548 } 2549 2550 static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, 2551 unsigned int nr_irqs, void *args) 2552 { 2553 msi_alloc_info_t *info = args; 2554 struct its_device *its_dev = info->scratchpad[0].ptr; 2555 struct its_node *its = its_dev->its; 2556 irq_hw_number_t hwirq; 2557 int err; 2558 int i; 2559 2560 err = its_alloc_device_irq(its_dev, nr_irqs, &hwirq); 2561 if (err) 2562 return err; 2563 2564 err = iommu_dma_prepare_msi(info->desc, its->get_msi_base(its_dev)); 2565 if (err) 2566 return err; 2567 2568 for (i = 0; i < nr_irqs; i++) { 2569 err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i); 2570 if (err) 2571 return err; 2572 2573 irq_domain_set_hwirq_and_chip(domain, virq + i, 2574 hwirq + i, &its_irq_chip, its_dev); 2575 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i))); 2576 pr_debug("ID:%d pID:%d vID:%d\n", 2577 (int)(hwirq + i - its_dev->event_map.lpi_base), 2578 (int)(hwirq + i), virq + i); 2579 } 2580 2581 return 0; 2582 } 2583 2584 static int its_irq_domain_activate(struct irq_domain *domain, 2585 struct irq_data *d, bool reserve) 2586 { 2587 struct its_device *its_dev = irq_data_get_irq_chip_data(d); 2588 u32 event = its_get_event_id(d); 2589 const struct cpumask *cpu_mask = cpu_online_mask; 2590 int cpu; 2591 2592 /* get the cpu_mask of local node */ 2593 if (its_dev->its->numa_node >= 0) 2594 cpu_mask = cpumask_of_node(its_dev->its->numa_node); 2595 2596 /* Bind the LPI to the first possible CPU */ 2597 cpu = cpumask_first_and(cpu_mask, cpu_online_mask); 2598 if (cpu >= nr_cpu_ids) { 2599 if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) 2600 return -EINVAL; 2601 2602 cpu = cpumask_first(cpu_online_mask); 2603 } 2604 2605 its_dev->event_map.col_map[event] = cpu; 2606 irq_data_update_effective_affinity(d, cpumask_of(cpu)); 2607 2608 /* Map the GIC IRQ and event to the device */ 2609 its_send_mapti(its_dev, d->hwirq, event); 2610 return 0; 2611 } 2612 2613 static void its_irq_domain_deactivate(struct irq_domain *domain, 2614 struct irq_data *d) 2615 { 2616 struct its_device *its_dev = irq_data_get_irq_chip_data(d); 2617 u32 event = its_get_event_id(d); 2618 2619 /* Stop the delivery of interrupts */ 2620 its_send_discard(its_dev, event); 2621 } 2622 2623 static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq, 2624 unsigned int nr_irqs) 2625 { 2626 struct irq_data *d = irq_domain_get_irq_data(domain, virq); 2627 struct its_device *its_dev = irq_data_get_irq_chip_data(d); 2628 struct its_node *its = its_dev->its; 2629 int i; 2630 2631 for (i = 0; i < nr_irqs; i++) { 2632 struct irq_data *data = irq_domain_get_irq_data(domain, 2633 virq + i); 2634 u32 event = its_get_event_id(data); 2635 2636 /* Mark interrupt index as unused */ 2637 clear_bit(event, its_dev->event_map.lpi_map); 2638 2639 /* Nuke the entry in the domain */ 2640 irq_domain_reset_irq_data(data); 2641 } 2642 2643 mutex_lock(&its->dev_alloc_lock); 2644 2645 /* 2646 * If all interrupts have been freed, start mopping the 2647 * floor. This is conditionned on the device not being shared. 2648 */ 2649 if (!its_dev->shared && 2650 bitmap_empty(its_dev->event_map.lpi_map, 2651 its_dev->event_map.nr_lpis)) { 2652 its_lpi_free(its_dev->event_map.lpi_map, 2653 its_dev->event_map.lpi_base, 2654 its_dev->event_map.nr_lpis); 2655 kfree(its_dev->event_map.col_map); 2656 2657 /* Unmap device/itt */ 2658 its_send_mapd(its_dev, 0); 2659 its_free_device(its_dev); 2660 } 2661 2662 mutex_unlock(&its->dev_alloc_lock); 2663 2664 irq_domain_free_irqs_parent(domain, virq, nr_irqs); 2665 } 2666 2667 static const struct irq_domain_ops its_domain_ops = { 2668 .alloc = its_irq_domain_alloc, 2669 .free = its_irq_domain_free, 2670 .activate = its_irq_domain_activate, 2671 .deactivate = its_irq_domain_deactivate, 2672 }; 2673 2674 /* 2675 * This is insane. 2676 * 2677 * If a GICv4 doesn't implement Direct LPIs (which is extremely 2678 * likely), the only way to perform an invalidate is to use a fake 2679 * device to issue an INV command, implying that the LPI has first 2680 * been mapped to some event on that device. Since this is not exactly 2681 * cheap, we try to keep that mapping around as long as possible, and 2682 * only issue an UNMAP if we're short on available slots. 2683 * 2684 * Broken by design(tm). 2685 */ 2686 static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe) 2687 { 2688 /* Already unmapped? */ 2689 if (vpe->vpe_proxy_event == -1) 2690 return; 2691 2692 its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event); 2693 vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL; 2694 2695 /* 2696 * We don't track empty slots at all, so let's move the 2697 * next_victim pointer if we can quickly reuse that slot 2698 * instead of nuking an existing entry. Not clear that this is 2699 * always a win though, and this might just generate a ripple 2700 * effect... Let's just hope VPEs don't migrate too often. 2701 */ 2702 if (vpe_proxy.vpes[vpe_proxy.next_victim]) 2703 vpe_proxy.next_victim = vpe->vpe_proxy_event; 2704 2705 vpe->vpe_proxy_event = -1; 2706 } 2707 2708 static void its_vpe_db_proxy_unmap(struct its_vpe *vpe) 2709 { 2710 if (!gic_rdists->has_direct_lpi) { 2711 unsigned long flags; 2712 2713 raw_spin_lock_irqsave(&vpe_proxy.lock, flags); 2714 its_vpe_db_proxy_unmap_locked(vpe); 2715 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags); 2716 } 2717 } 2718 2719 static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe) 2720 { 2721 /* Already mapped? */ 2722 if (vpe->vpe_proxy_event != -1) 2723 return; 2724 2725 /* This slot was already allocated. Kick the other VPE out. */ 2726 if (vpe_proxy.vpes[vpe_proxy.next_victim]) 2727 its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]); 2728 2729 /* Map the new VPE instead */ 2730 vpe_proxy.vpes[vpe_proxy.next_victim] = vpe; 2731 vpe->vpe_proxy_event = vpe_proxy.next_victim; 2732 vpe_proxy.next_victim = (vpe_proxy.next_victim + 1) % vpe_proxy.dev->nr_ites; 2733 2734 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx; 2735 its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event); 2736 } 2737 2738 static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to) 2739 { 2740 unsigned long flags; 2741 struct its_collection *target_col; 2742 2743 if (gic_rdists->has_direct_lpi) { 2744 void __iomem *rdbase; 2745 2746 rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base; 2747 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR); 2748 while (gic_read_lpir(rdbase + GICR_SYNCR) & 1) 2749 cpu_relax(); 2750 2751 return; 2752 } 2753 2754 raw_spin_lock_irqsave(&vpe_proxy.lock, flags); 2755 2756 its_vpe_db_proxy_map_locked(vpe); 2757 2758 target_col = &vpe_proxy.dev->its->collections[to]; 2759 its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event); 2760 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to; 2761 2762 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags); 2763 } 2764 2765 static int its_vpe_set_affinity(struct irq_data *d, 2766 const struct cpumask *mask_val, 2767 bool force) 2768 { 2769 struct its_vpe *vpe = irq_data_get_irq_chip_data(d); 2770 int cpu = cpumask_first(mask_val); 2771 2772 /* 2773 * Changing affinity is mega expensive, so let's be as lazy as 2774 * we can and only do it if we really have to. Also, if mapped 2775 * into the proxy device, we need to move the doorbell 2776 * interrupt to its new location. 2777 */ 2778 if (vpe->col_idx != cpu) { 2779 int from = vpe->col_idx; 2780 2781 vpe->col_idx = cpu; 2782 its_send_vmovp(vpe); 2783 its_vpe_db_proxy_move(vpe, from, cpu); 2784 } 2785 2786 irq_data_update_effective_affinity(d, cpumask_of(cpu)); 2787 2788 return IRQ_SET_MASK_OK_DONE; 2789 } 2790 2791 static void its_vpe_schedule(struct its_vpe *vpe) 2792 { 2793 void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); 2794 u64 val; 2795 2796 /* Schedule the VPE */ 2797 val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) & 2798 GENMASK_ULL(51, 12); 2799 val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK; 2800 val |= GICR_VPROPBASER_RaWb; 2801 val |= GICR_VPROPBASER_InnerShareable; 2802 gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); 2803 2804 val = virt_to_phys(page_address(vpe->vpt_page)) & 2805 GENMASK_ULL(51, 16); 2806 val |= GICR_VPENDBASER_RaWaWb; 2807 val |= GICR_VPENDBASER_NonShareable; 2808 /* 2809 * There is no good way of finding out if the pending table is 2810 * empty as we can race against the doorbell interrupt very 2811 * easily. So in the end, vpe->pending_last is only an 2812 * indication that the vcpu has something pending, not one 2813 * that the pending table is empty. A good implementation 2814 * would be able to read its coarse map pretty quickly anyway, 2815 * making this a tolerable issue. 2816 */ 2817 val |= GICR_VPENDBASER_PendingLast; 2818 val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0; 2819 val |= GICR_VPENDBASER_Valid; 2820 gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); 2821 } 2822 2823 static void its_vpe_deschedule(struct its_vpe *vpe) 2824 { 2825 void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); 2826 u64 val; 2827 2828 val = its_clear_vpend_valid(vlpi_base); 2829 2830 if (unlikely(val & GICR_VPENDBASER_Dirty)) { 2831 pr_err_ratelimited("ITS virtual pending table not cleaning\n"); 2832 vpe->idai = false; 2833 vpe->pending_last = true; 2834 } else { 2835 vpe->idai = !!(val & GICR_VPENDBASER_IDAI); 2836 vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast); 2837 } 2838 } 2839 2840 static void its_vpe_invall(struct its_vpe *vpe) 2841 { 2842 struct its_node *its; 2843 2844 list_for_each_entry(its, &its_nodes, entry) { 2845 if (!its->is_v4) 2846 continue; 2847 2848 if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr]) 2849 continue; 2850 2851 /* 2852 * Sending a VINVALL to a single ITS is enough, as all 2853 * we need is to reach the redistributors. 2854 */ 2855 its_send_vinvall(its, vpe); 2856 return; 2857 } 2858 } 2859 2860 static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) 2861 { 2862 struct its_vpe *vpe = irq_data_get_irq_chip_data(d); 2863 struct its_cmd_info *info = vcpu_info; 2864 2865 switch (info->cmd_type) { 2866 case SCHEDULE_VPE: 2867 its_vpe_schedule(vpe); 2868 return 0; 2869 2870 case DESCHEDULE_VPE: 2871 its_vpe_deschedule(vpe); 2872 return 0; 2873 2874 case INVALL_VPE: 2875 its_vpe_invall(vpe); 2876 return 0; 2877 2878 default: 2879 return -EINVAL; 2880 } 2881 } 2882 2883 static void its_vpe_send_cmd(struct its_vpe *vpe, 2884 void (*cmd)(struct its_device *, u32)) 2885 { 2886 unsigned long flags; 2887 2888 raw_spin_lock_irqsave(&vpe_proxy.lock, flags); 2889 2890 its_vpe_db_proxy_map_locked(vpe); 2891 cmd(vpe_proxy.dev, vpe->vpe_proxy_event); 2892 2893 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags); 2894 } 2895 2896 static void its_vpe_send_inv(struct irq_data *d) 2897 { 2898 struct its_vpe *vpe = irq_data_get_irq_chip_data(d); 2899 2900 if (gic_rdists->has_direct_lpi) { 2901 void __iomem *rdbase; 2902 2903 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; 2904 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_INVLPIR); 2905 while (gic_read_lpir(rdbase + GICR_SYNCR) & 1) 2906 cpu_relax(); 2907 } else { 2908 its_vpe_send_cmd(vpe, its_send_inv); 2909 } 2910 } 2911 2912 static void its_vpe_mask_irq(struct irq_data *d) 2913 { 2914 /* 2915 * We need to unmask the LPI, which is described by the parent 2916 * irq_data. Instead of calling into the parent (which won't 2917 * exactly do the right thing, let's simply use the 2918 * parent_data pointer. Yes, I'm naughty. 2919 */ 2920 lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0); 2921 its_vpe_send_inv(d); 2922 } 2923 2924 static void its_vpe_unmask_irq(struct irq_data *d) 2925 { 2926 /* Same hack as above... */ 2927 lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED); 2928 its_vpe_send_inv(d); 2929 } 2930 2931 static int its_vpe_set_irqchip_state(struct irq_data *d, 2932 enum irqchip_irq_state which, 2933 bool state) 2934 { 2935 struct its_vpe *vpe = irq_data_get_irq_chip_data(d); 2936 2937 if (which != IRQCHIP_STATE_PENDING) 2938 return -EINVAL; 2939 2940 if (gic_rdists->has_direct_lpi) { 2941 void __iomem *rdbase; 2942 2943 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; 2944 if (state) { 2945 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR); 2946 } else { 2947 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR); 2948 while (gic_read_lpir(rdbase + GICR_SYNCR) & 1) 2949 cpu_relax(); 2950 } 2951 } else { 2952 if (state) 2953 its_vpe_send_cmd(vpe, its_send_int); 2954 else 2955 its_vpe_send_cmd(vpe, its_send_clear); 2956 } 2957 2958 return 0; 2959 } 2960 2961 static struct irq_chip its_vpe_irq_chip = { 2962 .name = "GICv4-vpe", 2963 .irq_mask = its_vpe_mask_irq, 2964 .irq_unmask = its_vpe_unmask_irq, 2965 .irq_eoi = irq_chip_eoi_parent, 2966 .irq_set_affinity = its_vpe_set_affinity, 2967 .irq_set_irqchip_state = its_vpe_set_irqchip_state, 2968 .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity, 2969 }; 2970 2971 static int its_vpe_id_alloc(void) 2972 { 2973 return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL); 2974 } 2975 2976 static void its_vpe_id_free(u16 id) 2977 { 2978 ida_simple_remove(&its_vpeid_ida, id); 2979 } 2980 2981 static int its_vpe_init(struct its_vpe *vpe) 2982 { 2983 struct page *vpt_page; 2984 int vpe_id; 2985 2986 /* Allocate vpe_id */ 2987 vpe_id = its_vpe_id_alloc(); 2988 if (vpe_id < 0) 2989 return vpe_id; 2990 2991 /* Allocate VPT */ 2992 vpt_page = its_allocate_pending_table(GFP_KERNEL); 2993 if (!vpt_page) { 2994 its_vpe_id_free(vpe_id); 2995 return -ENOMEM; 2996 } 2997 2998 if (!its_alloc_vpe_table(vpe_id)) { 2999 its_vpe_id_free(vpe_id); 3000 its_free_pending_table(vpe->vpt_page); 3001 return -ENOMEM; 3002 } 3003 3004 vpe->vpe_id = vpe_id; 3005 vpe->vpt_page = vpt_page; 3006 vpe->vpe_proxy_event = -1; 3007 3008 return 0; 3009 } 3010 3011 static void its_vpe_teardown(struct its_vpe *vpe) 3012 { 3013 its_vpe_db_proxy_unmap(vpe); 3014 its_vpe_id_free(vpe->vpe_id); 3015 its_free_pending_table(vpe->vpt_page); 3016 } 3017 3018 static void its_vpe_irq_domain_free(struct irq_domain *domain, 3019 unsigned int virq, 3020 unsigned int nr_irqs) 3021 { 3022 struct its_vm *vm = domain->host_data; 3023 int i; 3024 3025 irq_domain_free_irqs_parent(domain, virq, nr_irqs); 3026 3027 for (i = 0; i < nr_irqs; i++) { 3028 struct irq_data *data = irq_domain_get_irq_data(domain, 3029 virq + i); 3030 struct its_vpe *vpe = irq_data_get_irq_chip_data(data); 3031 3032 BUG_ON(vm != vpe->its_vm); 3033 3034 clear_bit(data->hwirq, vm->db_bitmap); 3035 its_vpe_teardown(vpe); 3036 irq_domain_reset_irq_data(data); 3037 } 3038 3039 if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) { 3040 its_lpi_free(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis); 3041 its_free_prop_table(vm->vprop_page); 3042 } 3043 } 3044 3045 static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, 3046 unsigned int nr_irqs, void *args) 3047 { 3048 struct its_vm *vm = args; 3049 unsigned long *bitmap; 3050 struct page *vprop_page; 3051 int base, nr_ids, i, err = 0; 3052 3053 BUG_ON(!vm); 3054 3055 bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids); 3056 if (!bitmap) 3057 return -ENOMEM; 3058 3059 if (nr_ids < nr_irqs) { 3060 its_lpi_free(bitmap, base, nr_ids); 3061 return -ENOMEM; 3062 } 3063 3064 vprop_page = its_allocate_prop_table(GFP_KERNEL); 3065 if (!vprop_page) { 3066 its_lpi_free(bitmap, base, nr_ids); 3067 return -ENOMEM; 3068 } 3069 3070 vm->db_bitmap = bitmap; 3071 vm->db_lpi_base = base; 3072 vm->nr_db_lpis = nr_ids; 3073 vm->vprop_page = vprop_page; 3074 3075 for (i = 0; i < nr_irqs; i++) { 3076 vm->vpes[i]->vpe_db_lpi = base + i; 3077 err = its_vpe_init(vm->vpes[i]); 3078 if (err) 3079 break; 3080 err = its_irq_gic_domain_alloc(domain, virq + i, 3081 vm->vpes[i]->vpe_db_lpi); 3082 if (err) 3083 break; 3084 irq_domain_set_hwirq_and_chip(domain, virq + i, i, 3085 &its_vpe_irq_chip, vm->vpes[i]); 3086 set_bit(i, bitmap); 3087 } 3088 3089 if (err) { 3090 if (i > 0) 3091 its_vpe_irq_domain_free(domain, virq, i - 1); 3092 3093 its_lpi_free(bitmap, base, nr_ids); 3094 its_free_prop_table(vprop_page); 3095 } 3096 3097 return err; 3098 } 3099 3100 static int its_vpe_irq_domain_activate(struct irq_domain *domain, 3101 struct irq_data *d, bool reserve) 3102 { 3103 struct its_vpe *vpe = irq_data_get_irq_chip_data(d); 3104 struct its_node *its; 3105 3106 /* If we use the list map, we issue VMAPP on demand... */ 3107 if (its_list_map) 3108 return 0; 3109 3110 /* Map the VPE to the first possible CPU */ 3111 vpe->col_idx = cpumask_first(cpu_online_mask); 3112 3113 list_for_each_entry(its, &its_nodes, entry) { 3114 if (!its->is_v4) 3115 continue; 3116 3117 its_send_vmapp(its, vpe, true); 3118 its_send_vinvall(its, vpe); 3119 } 3120 3121 irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); 3122 3123 return 0; 3124 } 3125 3126 static void its_vpe_irq_domain_deactivate(struct irq_domain *domain, 3127 struct irq_data *d) 3128 { 3129 struct its_vpe *vpe = irq_data_get_irq_chip_data(d); 3130 struct its_node *its; 3131 3132 /* 3133 * If we use the list map, we unmap the VPE once no VLPIs are 3134 * associated with the VM. 3135 */ 3136 if (its_list_map) 3137 return; 3138 3139 list_for_each_entry(its, &its_nodes, entry) { 3140 if (!its->is_v4) 3141 continue; 3142 3143 its_send_vmapp(its, vpe, false); 3144 } 3145 } 3146 3147 static const struct irq_domain_ops its_vpe_domain_ops = { 3148 .alloc = its_vpe_irq_domain_alloc, 3149 .free = its_vpe_irq_domain_free, 3150 .activate = its_vpe_irq_domain_activate, 3151 .deactivate = its_vpe_irq_domain_deactivate, 3152 }; 3153 3154 static int its_force_quiescent(void __iomem *base) 3155 { 3156 u32 count = 1000000; /* 1s */ 3157 u32 val; 3158 3159 val = readl_relaxed(base + GITS_CTLR); 3160 /* 3161 * GIC architecture specification requires the ITS to be both 3162 * disabled and quiescent for writes to GITS_BASER<n> or 3163 * GITS_CBASER to not have UNPREDICTABLE results. 3164 */ 3165 if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE)) 3166 return 0; 3167 3168 /* Disable the generation of all interrupts to this ITS */ 3169 val &= ~(GITS_CTLR_ENABLE | GITS_CTLR_ImDe); 3170 writel_relaxed(val, base + GITS_CTLR); 3171 3172 /* Poll GITS_CTLR and wait until ITS becomes quiescent */ 3173 while (1) { 3174 val = readl_relaxed(base + GITS_CTLR); 3175 if (val & GITS_CTLR_QUIESCENT) 3176 return 0; 3177 3178 count--; 3179 if (!count) 3180 return -EBUSY; 3181 3182 cpu_relax(); 3183 udelay(1); 3184 } 3185 } 3186 3187 static bool __maybe_unused its_enable_quirk_cavium_22375(void *data) 3188 { 3189 struct its_node *its = data; 3190 3191 /* erratum 22375: only alloc 8MB table size */ 3192 its->device_ids = 0x14; /* 20 bits, 8MB */ 3193 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375; 3194 3195 return true; 3196 } 3197 3198 static bool __maybe_unused its_enable_quirk_cavium_23144(void *data) 3199 { 3200 struct its_node *its = data; 3201 3202 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144; 3203 3204 return true; 3205 } 3206 3207 static bool __maybe_unused its_enable_quirk_qdf2400_e0065(void *data) 3208 { 3209 struct its_node *its = data; 3210 3211 /* On QDF2400, the size of the ITE is 16Bytes */ 3212 its->ite_size = 16; 3213 3214 return true; 3215 } 3216 3217 static u64 its_irq_get_msi_base_pre_its(struct its_device *its_dev) 3218 { 3219 struct its_node *its = its_dev->its; 3220 3221 /* 3222 * The Socionext Synquacer SoC has a so-called 'pre-ITS', 3223 * which maps 32-bit writes targeted at a separate window of 3224 * size '4 << device_id_bits' onto writes to GITS_TRANSLATER 3225 * with device ID taken from bits [device_id_bits + 1:2] of 3226 * the window offset. 3227 */ 3228 return its->pre_its_base + (its_dev->device_id << 2); 3229 } 3230 3231 static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data) 3232 { 3233 struct its_node *its = data; 3234 u32 pre_its_window[2]; 3235 u32 ids; 3236 3237 if (!fwnode_property_read_u32_array(its->fwnode_handle, 3238 "socionext,synquacer-pre-its", 3239 pre_its_window, 3240 ARRAY_SIZE(pre_its_window))) { 3241 3242 its->pre_its_base = pre_its_window[0]; 3243 its->get_msi_base = its_irq_get_msi_base_pre_its; 3244 3245 ids = ilog2(pre_its_window[1]) - 2; 3246 if (its->device_ids > ids) 3247 its->device_ids = ids; 3248 3249 /* the pre-ITS breaks isolation, so disable MSI remapping */ 3250 its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_MSI_REMAP; 3251 return true; 3252 } 3253 return false; 3254 } 3255 3256 static bool __maybe_unused its_enable_quirk_hip07_161600802(void *data) 3257 { 3258 struct its_node *its = data; 3259 3260 /* 3261 * Hip07 insists on using the wrong address for the VLPI 3262 * page. Trick it into doing the right thing... 3263 */ 3264 its->vlpi_redist_offset = SZ_128K; 3265 return true; 3266 } 3267 3268 static const struct gic_quirk its_quirks[] = { 3269 #ifdef CONFIG_CAVIUM_ERRATUM_22375 3270 { 3271 .desc = "ITS: Cavium errata 22375, 24313", 3272 .iidr = 0xa100034c, /* ThunderX pass 1.x */ 3273 .mask = 0xffff0fff, 3274 .init = its_enable_quirk_cavium_22375, 3275 }, 3276 #endif 3277 #ifdef CONFIG_CAVIUM_ERRATUM_23144 3278 { 3279 .desc = "ITS: Cavium erratum 23144", 3280 .iidr = 0xa100034c, /* ThunderX pass 1.x */ 3281 .mask = 0xffff0fff, 3282 .init = its_enable_quirk_cavium_23144, 3283 }, 3284 #endif 3285 #ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065 3286 { 3287 .desc = "ITS: QDF2400 erratum 0065", 3288 .iidr = 0x00001070, /* QDF2400 ITS rev 1.x */ 3289 .mask = 0xffffffff, 3290 .init = its_enable_quirk_qdf2400_e0065, 3291 }, 3292 #endif 3293 #ifdef CONFIG_SOCIONEXT_SYNQUACER_PREITS 3294 { 3295 /* 3296 * The Socionext Synquacer SoC incorporates ARM's own GIC-500 3297 * implementation, but with a 'pre-ITS' added that requires 3298 * special handling in software. 3299 */ 3300 .desc = "ITS: Socionext Synquacer pre-ITS", 3301 .iidr = 0x0001143b, 3302 .mask = 0xffffffff, 3303 .init = its_enable_quirk_socionext_synquacer, 3304 }, 3305 #endif 3306 #ifdef CONFIG_HISILICON_ERRATUM_161600802 3307 { 3308 .desc = "ITS: Hip07 erratum 161600802", 3309 .iidr = 0x00000004, 3310 .mask = 0xffffffff, 3311 .init = its_enable_quirk_hip07_161600802, 3312 }, 3313 #endif 3314 { 3315 } 3316 }; 3317 3318 static void its_enable_quirks(struct its_node *its) 3319 { 3320 u32 iidr = readl_relaxed(its->base + GITS_IIDR); 3321 3322 gic_enable_quirks(iidr, its_quirks, its); 3323 } 3324 3325 static int its_save_disable(void) 3326 { 3327 struct its_node *its; 3328 int err = 0; 3329 3330 raw_spin_lock(&its_lock); 3331 list_for_each_entry(its, &its_nodes, entry) { 3332 void __iomem *base; 3333 3334 if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE)) 3335 continue; 3336 3337 base = its->base; 3338 its->ctlr_save = readl_relaxed(base + GITS_CTLR); 3339 err = its_force_quiescent(base); 3340 if (err) { 3341 pr_err("ITS@%pa: failed to quiesce: %d\n", 3342 &its->phys_base, err); 3343 writel_relaxed(its->ctlr_save, base + GITS_CTLR); 3344 goto err; 3345 } 3346 3347 its->cbaser_save = gits_read_cbaser(base + GITS_CBASER); 3348 } 3349 3350 err: 3351 if (err) { 3352 list_for_each_entry_continue_reverse(its, &its_nodes, entry) { 3353 void __iomem *base; 3354 3355 if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE)) 3356 continue; 3357 3358 base = its->base; 3359 writel_relaxed(its->ctlr_save, base + GITS_CTLR); 3360 } 3361 } 3362 raw_spin_unlock(&its_lock); 3363 3364 return err; 3365 } 3366 3367 static void its_restore_enable(void) 3368 { 3369 struct its_node *its; 3370 int ret; 3371 3372 raw_spin_lock(&its_lock); 3373 list_for_each_entry(its, &its_nodes, entry) { 3374 void __iomem *base; 3375 int i; 3376 3377 if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE)) 3378 continue; 3379 3380 base = its->base; 3381 3382 /* 3383 * Make sure that the ITS is disabled. If it fails to quiesce, 3384 * don't restore it since writing to CBASER or BASER<n> 3385 * registers is undefined according to the GIC v3 ITS 3386 * Specification. 3387 */ 3388 ret = its_force_quiescent(base); 3389 if (ret) { 3390 pr_err("ITS@%pa: failed to quiesce on resume: %d\n", 3391 &its->phys_base, ret); 3392 continue; 3393 } 3394 3395 gits_write_cbaser(its->cbaser_save, base + GITS_CBASER); 3396 3397 /* 3398 * Writing CBASER resets CREADR to 0, so make CWRITER and 3399 * cmd_write line up with it. 3400 */ 3401 its->cmd_write = its->cmd_base; 3402 gits_write_cwriter(0, base + GITS_CWRITER); 3403 3404 /* Restore GITS_BASER from the value cache. */ 3405 for (i = 0; i < GITS_BASER_NR_REGS; i++) { 3406 struct its_baser *baser = &its->tables[i]; 3407 3408 if (!(baser->val & GITS_BASER_VALID)) 3409 continue; 3410 3411 its_write_baser(its, baser, baser->val); 3412 } 3413 writel_relaxed(its->ctlr_save, base + GITS_CTLR); 3414 3415 /* 3416 * Reinit the collection if it's stored in the ITS. This is 3417 * indicated by the col_id being less than the HCC field. 3418 * CID < HCC as specified in the GIC v3 Documentation. 3419 */ 3420 if (its->collections[smp_processor_id()].col_id < 3421 GITS_TYPER_HCC(gic_read_typer(base + GITS_TYPER))) 3422 its_cpu_init_collection(its); 3423 } 3424 raw_spin_unlock(&its_lock); 3425 } 3426 3427 static struct syscore_ops its_syscore_ops = { 3428 .suspend = its_save_disable, 3429 .resume = its_restore_enable, 3430 }; 3431 3432 static int its_init_domain(struct fwnode_handle *handle, struct its_node *its) 3433 { 3434 struct irq_domain *inner_domain; 3435 struct msi_domain_info *info; 3436 3437 info = kzalloc(sizeof(*info), GFP_KERNEL); 3438 if (!info) 3439 return -ENOMEM; 3440 3441 inner_domain = irq_domain_create_tree(handle, &its_domain_ops, its); 3442 if (!inner_domain) { 3443 kfree(info); 3444 return -ENOMEM; 3445 } 3446 3447 inner_domain->parent = its_parent; 3448 irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS); 3449 inner_domain->flags |= its->msi_domain_flags; 3450 info->ops = &its_msi_domain_ops; 3451 info->data = its; 3452 inner_domain->host_data = info; 3453 3454 return 0; 3455 } 3456 3457 static int its_init_vpe_domain(void) 3458 { 3459 struct its_node *its; 3460 u32 devid; 3461 int entries; 3462 3463 if (gic_rdists->has_direct_lpi) { 3464 pr_info("ITS: Using DirectLPI for VPE invalidation\n"); 3465 return 0; 3466 } 3467 3468 /* Any ITS will do, even if not v4 */ 3469 its = list_first_entry(&its_nodes, struct its_node, entry); 3470 3471 entries = roundup_pow_of_two(nr_cpu_ids); 3472 vpe_proxy.vpes = kcalloc(entries, sizeof(*vpe_proxy.vpes), 3473 GFP_KERNEL); 3474 if (!vpe_proxy.vpes) { 3475 pr_err("ITS: Can't allocate GICv4 proxy device array\n"); 3476 return -ENOMEM; 3477 } 3478 3479 /* Use the last possible DevID */ 3480 devid = GENMASK(its->device_ids - 1, 0); 3481 vpe_proxy.dev = its_create_device(its, devid, entries, false); 3482 if (!vpe_proxy.dev) { 3483 kfree(vpe_proxy.vpes); 3484 pr_err("ITS: Can't allocate GICv4 proxy device\n"); 3485 return -ENOMEM; 3486 } 3487 3488 BUG_ON(entries > vpe_proxy.dev->nr_ites); 3489 3490 raw_spin_lock_init(&vpe_proxy.lock); 3491 vpe_proxy.next_victim = 0; 3492 pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n", 3493 devid, vpe_proxy.dev->nr_ites); 3494 3495 return 0; 3496 } 3497 3498 static int __init its_compute_its_list_map(struct resource *res, 3499 void __iomem *its_base) 3500 { 3501 int its_number; 3502 u32 ctlr; 3503 3504 /* 3505 * This is assumed to be done early enough that we're 3506 * guaranteed to be single-threaded, hence no 3507 * locking. Should this change, we should address 3508 * this. 3509 */ 3510 its_number = find_first_zero_bit(&its_list_map, GICv4_ITS_LIST_MAX); 3511 if (its_number >= GICv4_ITS_LIST_MAX) { 3512 pr_err("ITS@%pa: No ITSList entry available!\n", 3513 &res->start); 3514 return -EINVAL; 3515 } 3516 3517 ctlr = readl_relaxed(its_base + GITS_CTLR); 3518 ctlr &= ~GITS_CTLR_ITS_NUMBER; 3519 ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT; 3520 writel_relaxed(ctlr, its_base + GITS_CTLR); 3521 ctlr = readl_relaxed(its_base + GITS_CTLR); 3522 if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) { 3523 its_number = ctlr & GITS_CTLR_ITS_NUMBER; 3524 its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT; 3525 } 3526 3527 if (test_and_set_bit(its_number, &its_list_map)) { 3528 pr_err("ITS@%pa: Duplicate ITSList entry %d\n", 3529 &res->start, its_number); 3530 return -EINVAL; 3531 } 3532 3533 return its_number; 3534 } 3535 3536 static int __init its_probe_one(struct resource *res, 3537 struct fwnode_handle *handle, int numa_node) 3538 { 3539 struct its_node *its; 3540 void __iomem *its_base; 3541 u32 val, ctlr; 3542 u64 baser, tmp, typer; 3543 struct page *page; 3544 int err; 3545 3546 its_base = ioremap(res->start, resource_size(res)); 3547 if (!its_base) { 3548 pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start); 3549 return -ENOMEM; 3550 } 3551 3552 val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK; 3553 if (val != 0x30 && val != 0x40) { 3554 pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start); 3555 err = -ENODEV; 3556 goto out_unmap; 3557 } 3558 3559 err = its_force_quiescent(its_base); 3560 if (err) { 3561 pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start); 3562 goto out_unmap; 3563 } 3564 3565 pr_info("ITS %pR\n", res); 3566 3567 its = kzalloc(sizeof(*its), GFP_KERNEL); 3568 if (!its) { 3569 err = -ENOMEM; 3570 goto out_unmap; 3571 } 3572 3573 raw_spin_lock_init(&its->lock); 3574 mutex_init(&its->dev_alloc_lock); 3575 INIT_LIST_HEAD(&its->entry); 3576 INIT_LIST_HEAD(&its->its_device_list); 3577 typer = gic_read_typer(its_base + GITS_TYPER); 3578 its->base = its_base; 3579 its->phys_base = res->start; 3580 its->ite_size = GITS_TYPER_ITT_ENTRY_SIZE(typer); 3581 its->device_ids = GITS_TYPER_DEVBITS(typer); 3582 its->is_v4 = !!(typer & GITS_TYPER_VLPIS); 3583 if (its->is_v4) { 3584 if (!(typer & GITS_TYPER_VMOVP)) { 3585 err = its_compute_its_list_map(res, its_base); 3586 if (err < 0) 3587 goto out_free_its; 3588 3589 its->list_nr = err; 3590 3591 pr_info("ITS@%pa: Using ITS number %d\n", 3592 &res->start, err); 3593 } else { 3594 pr_info("ITS@%pa: Single VMOVP capable\n", &res->start); 3595 } 3596 } 3597 3598 its->numa_node = numa_node; 3599 3600 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, 3601 get_order(ITS_CMD_QUEUE_SZ)); 3602 if (!page) { 3603 err = -ENOMEM; 3604 goto out_free_its; 3605 } 3606 its->cmd_base = (void *)page_address(page); 3607 its->cmd_write = its->cmd_base; 3608 its->fwnode_handle = handle; 3609 its->get_msi_base = its_irq_get_msi_base; 3610 its->msi_domain_flags = IRQ_DOMAIN_FLAG_MSI_REMAP; 3611 3612 its_enable_quirks(its); 3613 3614 err = its_alloc_tables(its); 3615 if (err) 3616 goto out_free_cmd; 3617 3618 err = its_alloc_collections(its); 3619 if (err) 3620 goto out_free_tables; 3621 3622 baser = (virt_to_phys(its->cmd_base) | 3623 GITS_CBASER_RaWaWb | 3624 GITS_CBASER_InnerShareable | 3625 (ITS_CMD_QUEUE_SZ / SZ_4K - 1) | 3626 GITS_CBASER_VALID); 3627 3628 gits_write_cbaser(baser, its->base + GITS_CBASER); 3629 tmp = gits_read_cbaser(its->base + GITS_CBASER); 3630 3631 if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) { 3632 if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) { 3633 /* 3634 * The HW reports non-shareable, we must 3635 * remove the cacheability attributes as 3636 * well. 3637 */ 3638 baser &= ~(GITS_CBASER_SHAREABILITY_MASK | 3639 GITS_CBASER_CACHEABILITY_MASK); 3640 baser |= GITS_CBASER_nC; 3641 gits_write_cbaser(baser, its->base + GITS_CBASER); 3642 } 3643 pr_info("ITS: using cache flushing for cmd queue\n"); 3644 its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING; 3645 } 3646 3647 gits_write_cwriter(0, its->base + GITS_CWRITER); 3648 ctlr = readl_relaxed(its->base + GITS_CTLR); 3649 ctlr |= GITS_CTLR_ENABLE; 3650 if (its->is_v4) 3651 ctlr |= GITS_CTLR_ImDe; 3652 writel_relaxed(ctlr, its->base + GITS_CTLR); 3653 3654 if (GITS_TYPER_HCC(typer)) 3655 its->flags |= ITS_FLAGS_SAVE_SUSPEND_STATE; 3656 3657 err = its_init_domain(handle, its); 3658 if (err) 3659 goto out_free_tables; 3660 3661 raw_spin_lock(&its_lock); 3662 list_add(&its->entry, &its_nodes); 3663 raw_spin_unlock(&its_lock); 3664 3665 return 0; 3666 3667 out_free_tables: 3668 its_free_tables(its); 3669 out_free_cmd: 3670 free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ)); 3671 out_free_its: 3672 kfree(its); 3673 out_unmap: 3674 iounmap(its_base); 3675 pr_err("ITS@%pa: failed probing (%d)\n", &res->start, err); 3676 return err; 3677 } 3678 3679 static bool gic_rdists_supports_plpis(void) 3680 { 3681 return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS); 3682 } 3683 3684 static int redist_disable_lpis(void) 3685 { 3686 void __iomem *rbase = gic_data_rdist_rd_base(); 3687 u64 timeout = USEC_PER_SEC; 3688 u64 val; 3689 3690 if (!gic_rdists_supports_plpis()) { 3691 pr_info("CPU%d: LPIs not supported\n", smp_processor_id()); 3692 return -ENXIO; 3693 } 3694 3695 val = readl_relaxed(rbase + GICR_CTLR); 3696 if (!(val & GICR_CTLR_ENABLE_LPIS)) 3697 return 0; 3698 3699 /* 3700 * If coming via a CPU hotplug event, we don't need to disable 3701 * LPIs before trying to re-enable them. They are already 3702 * configured and all is well in the world. 3703 * 3704 * If running with preallocated tables, there is nothing to do. 3705 */ 3706 if (gic_data_rdist()->lpi_enabled || 3707 (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED)) 3708 return 0; 3709 3710 /* 3711 * From that point on, we only try to do some damage control. 3712 */ 3713 pr_warn("GICv3: CPU%d: Booted with LPIs enabled, memory probably corrupted\n", 3714 smp_processor_id()); 3715 add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); 3716 3717 /* Disable LPIs */ 3718 val &= ~GICR_CTLR_ENABLE_LPIS; 3719 writel_relaxed(val, rbase + GICR_CTLR); 3720 3721 /* Make sure any change to GICR_CTLR is observable by the GIC */ 3722 dsb(sy); 3723 3724 /* 3725 * Software must observe RWP==0 after clearing GICR_CTLR.EnableLPIs 3726 * from 1 to 0 before programming GICR_PEND{PROP}BASER registers. 3727 * Error out if we time out waiting for RWP to clear. 3728 */ 3729 while (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_RWP) { 3730 if (!timeout) { 3731 pr_err("CPU%d: Timeout while disabling LPIs\n", 3732 smp_processor_id()); 3733 return -ETIMEDOUT; 3734 } 3735 udelay(1); 3736 timeout--; 3737 } 3738 3739 /* 3740 * After it has been written to 1, it is IMPLEMENTATION 3741 * DEFINED whether GICR_CTLR.EnableLPI becomes RES1 or can be 3742 * cleared to 0. Error out if clearing the bit failed. 3743 */ 3744 if (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_ENABLE_LPIS) { 3745 pr_err("CPU%d: Failed to disable LPIs\n", smp_processor_id()); 3746 return -EBUSY; 3747 } 3748 3749 return 0; 3750 } 3751 3752 int its_cpu_init(void) 3753 { 3754 if (!list_empty(&its_nodes)) { 3755 int ret; 3756 3757 ret = redist_disable_lpis(); 3758 if (ret) 3759 return ret; 3760 3761 its_cpu_init_lpis(); 3762 its_cpu_init_collections(); 3763 } 3764 3765 return 0; 3766 } 3767 3768 static const struct of_device_id its_device_id[] = { 3769 { .compatible = "arm,gic-v3-its", }, 3770 {}, 3771 }; 3772 3773 static int __init its_of_probe(struct device_node *node) 3774 { 3775 struct device_node *np; 3776 struct resource res; 3777 3778 for (np = of_find_matching_node(node, its_device_id); np; 3779 np = of_find_matching_node(np, its_device_id)) { 3780 if (!of_device_is_available(np)) 3781 continue; 3782 if (!of_property_read_bool(np, "msi-controller")) { 3783 pr_warn("%pOF: no msi-controller property, ITS ignored\n", 3784 np); 3785 continue; 3786 } 3787 3788 if (of_address_to_resource(np, 0, &res)) { 3789 pr_warn("%pOF: no regs?\n", np); 3790 continue; 3791 } 3792 3793 its_probe_one(&res, &np->fwnode, of_node_to_nid(np)); 3794 } 3795 return 0; 3796 } 3797 3798 #ifdef CONFIG_ACPI 3799 3800 #define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K) 3801 3802 #ifdef CONFIG_ACPI_NUMA 3803 struct its_srat_map { 3804 /* numa node id */ 3805 u32 numa_node; 3806 /* GIC ITS ID */ 3807 u32 its_id; 3808 }; 3809 3810 static struct its_srat_map *its_srat_maps __initdata; 3811 static int its_in_srat __initdata; 3812 3813 static int __init acpi_get_its_numa_node(u32 its_id) 3814 { 3815 int i; 3816 3817 for (i = 0; i < its_in_srat; i++) { 3818 if (its_id == its_srat_maps[i].its_id) 3819 return its_srat_maps[i].numa_node; 3820 } 3821 return NUMA_NO_NODE; 3822 } 3823 3824 static int __init gic_acpi_match_srat_its(union acpi_subtable_headers *header, 3825 const unsigned long end) 3826 { 3827 return 0; 3828 } 3829 3830 static int __init gic_acpi_parse_srat_its(union acpi_subtable_headers *header, 3831 const unsigned long end) 3832 { 3833 int node; 3834 struct acpi_srat_gic_its_affinity *its_affinity; 3835 3836 its_affinity = (struct acpi_srat_gic_its_affinity *)header; 3837 if (!its_affinity) 3838 return -EINVAL; 3839 3840 if (its_affinity->header.length < sizeof(*its_affinity)) { 3841 pr_err("SRAT: Invalid header length %d in ITS affinity\n", 3842 its_affinity->header.length); 3843 return -EINVAL; 3844 } 3845 3846 node = acpi_map_pxm_to_node(its_affinity->proximity_domain); 3847 3848 if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) { 3849 pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node); 3850 return 0; 3851 } 3852 3853 its_srat_maps[its_in_srat].numa_node = node; 3854 its_srat_maps[its_in_srat].its_id = its_affinity->its_id; 3855 its_in_srat++; 3856 pr_info("SRAT: PXM %d -> ITS %d -> Node %d\n", 3857 its_affinity->proximity_domain, its_affinity->its_id, node); 3858 3859 return 0; 3860 } 3861 3862 static void __init acpi_table_parse_srat_its(void) 3863 { 3864 int count; 3865 3866 count = acpi_table_parse_entries(ACPI_SIG_SRAT, 3867 sizeof(struct acpi_table_srat), 3868 ACPI_SRAT_TYPE_GIC_ITS_AFFINITY, 3869 gic_acpi_match_srat_its, 0); 3870 if (count <= 0) 3871 return; 3872 3873 its_srat_maps = kmalloc_array(count, sizeof(struct its_srat_map), 3874 GFP_KERNEL); 3875 if (!its_srat_maps) { 3876 pr_warn("SRAT: Failed to allocate memory for its_srat_maps!\n"); 3877 return; 3878 } 3879 3880 acpi_table_parse_entries(ACPI_SIG_SRAT, 3881 sizeof(struct acpi_table_srat), 3882 ACPI_SRAT_TYPE_GIC_ITS_AFFINITY, 3883 gic_acpi_parse_srat_its, 0); 3884 } 3885 3886 /* free the its_srat_maps after ITS probing */ 3887 static void __init acpi_its_srat_maps_free(void) 3888 { 3889 kfree(its_srat_maps); 3890 } 3891 #else 3892 static void __init acpi_table_parse_srat_its(void) { } 3893 static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; } 3894 static void __init acpi_its_srat_maps_free(void) { } 3895 #endif 3896 3897 static int __init gic_acpi_parse_madt_its(union acpi_subtable_headers *header, 3898 const unsigned long end) 3899 { 3900 struct acpi_madt_generic_translator *its_entry; 3901 struct fwnode_handle *dom_handle; 3902 struct resource res; 3903 int err; 3904 3905 its_entry = (struct acpi_madt_generic_translator *)header; 3906 memset(&res, 0, sizeof(res)); 3907 res.start = its_entry->base_address; 3908 res.end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1; 3909 res.flags = IORESOURCE_MEM; 3910 3911 dom_handle = irq_domain_alloc_fwnode((void *)its_entry->base_address); 3912 if (!dom_handle) { 3913 pr_err("ITS@%pa: Unable to allocate GICv3 ITS domain token\n", 3914 &res.start); 3915 return -ENOMEM; 3916 } 3917 3918 err = iort_register_domain_token(its_entry->translation_id, res.start, 3919 dom_handle); 3920 if (err) { 3921 pr_err("ITS@%pa: Unable to register GICv3 ITS domain token (ITS ID %d) to IORT\n", 3922 &res.start, its_entry->translation_id); 3923 goto dom_err; 3924 } 3925 3926 err = its_probe_one(&res, dom_handle, 3927 acpi_get_its_numa_node(its_entry->translation_id)); 3928 if (!err) 3929 return 0; 3930 3931 iort_deregister_domain_token(its_entry->translation_id); 3932 dom_err: 3933 irq_domain_free_fwnode(dom_handle); 3934 return err; 3935 } 3936 3937 static void __init its_acpi_probe(void) 3938 { 3939 acpi_table_parse_srat_its(); 3940 acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR, 3941 gic_acpi_parse_madt_its, 0); 3942 acpi_its_srat_maps_free(); 3943 } 3944 #else 3945 static void __init its_acpi_probe(void) { } 3946 #endif 3947 3948 int __init its_init(struct fwnode_handle *handle, struct rdists *rdists, 3949 struct irq_domain *parent_domain) 3950 { 3951 struct device_node *of_node; 3952 struct its_node *its; 3953 bool has_v4 = false; 3954 int err; 3955 3956 its_parent = parent_domain; 3957 of_node = to_of_node(handle); 3958 if (of_node) 3959 its_of_probe(of_node); 3960 else 3961 its_acpi_probe(); 3962 3963 if (list_empty(&its_nodes)) { 3964 pr_warn("ITS: No ITS available, not enabling LPIs\n"); 3965 return -ENXIO; 3966 } 3967 3968 gic_rdists = rdists; 3969 3970 err = allocate_lpi_tables(); 3971 if (err) 3972 return err; 3973 3974 list_for_each_entry(its, &its_nodes, entry) 3975 has_v4 |= its->is_v4; 3976 3977 if (has_v4 & rdists->has_vlpis) { 3978 if (its_init_vpe_domain() || 3979 its_init_v4(parent_domain, &its_vpe_domain_ops)) { 3980 rdists->has_vlpis = false; 3981 pr_err("ITS: Disabling GICv4 support\n"); 3982 } 3983 } 3984 3985 register_syscore_ops(&its_syscore_ops); 3986 3987 return 0; 3988 } 3989