1 /** 2 * imr.c -- Intel Isolated Memory Region driver 3 * 4 * Copyright(c) 2013 Intel Corporation. 5 * Copyright(c) 2015 Bryan O'Donoghue <pure.logic@nexus-software.ie> 6 * 7 * IMR registers define an isolated region of memory that can 8 * be masked to prohibit certain system agents from accessing memory. 9 * When a device behind a masked port performs an access - snooped or 10 * not, an IMR may optionally prevent that transaction from changing 11 * the state of memory or from getting correct data in response to the 12 * operation. 13 * 14 * Write data will be dropped and reads will return 0xFFFFFFFF, the 15 * system will reset and system BIOS will print out an error message to 16 * inform the user that an IMR has been violated. 17 * 18 * This code is based on the Linux MTRR code and reference code from 19 * Intel's Quark BSP EFI, Linux and grub code. 20 * 21 * See quark-x1000-datasheet.pdf for register definitions. 22 * http://www.intel.com/content/dam/www/public/us/en/documents/datasheets/quark-x1000-datasheet.pdf 23 */ 24 25 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 26 27 #include <asm-generic/sections.h> 28 #include <asm/cpu_device_id.h> 29 #include <asm/imr.h> 30 #include <asm/iosf_mbi.h> 31 #include <linux/debugfs.h> 32 #include <linux/init.h> 33 #include <linux/mm.h> 34 #include <linux/types.h> 35 36 struct imr_device { 37 struct dentry *file; 38 bool init; 39 struct mutex lock; 40 int max_imr; 41 int reg_base; 42 }; 43 44 static struct imr_device imr_dev; 45 46 /* 47 * IMR read/write mask control registers. 48 * See quark-x1000-datasheet.pdf sections 12.7.4.5 and 12.7.4.6 for 49 * bit definitions. 50 * 51 * addr_hi 52 * 31 Lock bit 53 * 30:24 Reserved 54 * 23:2 1 KiB aligned lo address 55 * 1:0 Reserved 56 * 57 * addr_hi 58 * 31:24 Reserved 59 * 23:2 1 KiB aligned hi address 60 * 1:0 Reserved 61 */ 62 #define IMR_LOCK BIT(31) 63 64 struct imr_regs { 65 u32 addr_lo; 66 u32 addr_hi; 67 u32 rmask; 68 u32 wmask; 69 }; 70 71 #define IMR_NUM_REGS (sizeof(struct imr_regs)/sizeof(u32)) 72 #define IMR_SHIFT 8 73 #define imr_to_phys(x) ((x) << IMR_SHIFT) 74 #define phys_to_imr(x) ((x) >> IMR_SHIFT) 75 76 /** 77 * imr_is_enabled - true if an IMR is enabled false otherwise. 78 * 79 * Determines if an IMR is enabled based on address range and read/write 80 * mask. An IMR set with an address range set to zero and a read/write 81 * access mask set to all is considered to be disabled. An IMR in any 82 * other state - for example set to zero but without read/write access 83 * all is considered to be enabled. This definition of disabled is how 84 * firmware switches off an IMR and is maintained in kernel for 85 * consistency. 86 * 87 * @imr: pointer to IMR descriptor. 88 * @return: true if IMR enabled false if disabled. 89 */ 90 static inline int imr_is_enabled(struct imr_regs *imr) 91 { 92 return !(imr->rmask == IMR_READ_ACCESS_ALL && 93 imr->wmask == IMR_WRITE_ACCESS_ALL && 94 imr_to_phys(imr->addr_lo) == 0 && 95 imr_to_phys(imr->addr_hi) == 0); 96 } 97 98 /** 99 * imr_read - read an IMR at a given index. 100 * 101 * Requires caller to hold imr mutex. 102 * 103 * @idev: pointer to imr_device structure. 104 * @imr_id: IMR entry to read. 105 * @imr: IMR structure representing address and access masks. 106 * @return: 0 on success or error code passed from mbi_iosf on failure. 107 */ 108 static int imr_read(struct imr_device *idev, u32 imr_id, struct imr_regs *imr) 109 { 110 u32 reg = imr_id * IMR_NUM_REGS + idev->reg_base; 111 int ret; 112 113 ret = iosf_mbi_read(QRK_MBI_UNIT_MM, MBI_REG_READ, reg++, &imr->addr_lo); 114 if (ret) 115 return ret; 116 117 ret = iosf_mbi_read(QRK_MBI_UNIT_MM, MBI_REG_READ, reg++, &imr->addr_hi); 118 if (ret) 119 return ret; 120 121 ret = iosf_mbi_read(QRK_MBI_UNIT_MM, MBI_REG_READ, reg++, &imr->rmask); 122 if (ret) 123 return ret; 124 125 return iosf_mbi_read(QRK_MBI_UNIT_MM, MBI_REG_READ, reg++, &imr->wmask); 126 } 127 128 /** 129 * imr_write - write an IMR at a given index. 130 * 131 * Requires caller to hold imr mutex. 132 * Note lock bits need to be written independently of address bits. 133 * 134 * @idev: pointer to imr_device structure. 135 * @imr_id: IMR entry to write. 136 * @imr: IMR structure representing address and access masks. 137 * @return: 0 on success or error code passed from mbi_iosf on failure. 138 */ 139 static int imr_write(struct imr_device *idev, u32 imr_id, struct imr_regs *imr) 140 { 141 unsigned long flags; 142 u32 reg = imr_id * IMR_NUM_REGS + idev->reg_base; 143 int ret; 144 145 local_irq_save(flags); 146 147 ret = iosf_mbi_write(QRK_MBI_UNIT_MM, MBI_REG_WRITE, reg++, imr->addr_lo); 148 if (ret) 149 goto failed; 150 151 ret = iosf_mbi_write(QRK_MBI_UNIT_MM, MBI_REG_WRITE, reg++, imr->addr_hi); 152 if (ret) 153 goto failed; 154 155 ret = iosf_mbi_write(QRK_MBI_UNIT_MM, MBI_REG_WRITE, reg++, imr->rmask); 156 if (ret) 157 goto failed; 158 159 ret = iosf_mbi_write(QRK_MBI_UNIT_MM, MBI_REG_WRITE, reg++, imr->wmask); 160 if (ret) 161 goto failed; 162 163 local_irq_restore(flags); 164 return 0; 165 failed: 166 /* 167 * If writing to the IOSF failed then we're in an unknown state, 168 * likely a very bad state. An IMR in an invalid state will almost 169 * certainly lead to a memory access violation. 170 */ 171 local_irq_restore(flags); 172 WARN(ret, "IOSF-MBI write fail range 0x%08x-0x%08x unreliable\n", 173 imr_to_phys(imr->addr_lo), imr_to_phys(imr->addr_hi) + IMR_MASK); 174 175 return ret; 176 } 177 178 /** 179 * imr_dbgfs_state_show - print state of IMR registers. 180 * 181 * @s: pointer to seq_file for output. 182 * @unused: unused parameter. 183 * @return: 0 on success or error code passed from mbi_iosf on failure. 184 */ 185 static int imr_dbgfs_state_show(struct seq_file *s, void *unused) 186 { 187 phys_addr_t base; 188 phys_addr_t end; 189 int i; 190 struct imr_device *idev = s->private; 191 struct imr_regs imr; 192 size_t size; 193 int ret = -ENODEV; 194 195 mutex_lock(&idev->lock); 196 197 for (i = 0; i < idev->max_imr; i++) { 198 199 ret = imr_read(idev, i, &imr); 200 if (ret) 201 break; 202 203 /* 204 * Remember to add IMR_ALIGN bytes to size to indicate the 205 * inherent IMR_ALIGN size bytes contained in the masked away 206 * lower ten bits. 207 */ 208 if (imr_is_enabled(&imr)) { 209 base = imr_to_phys(imr.addr_lo); 210 end = imr_to_phys(imr.addr_hi) + IMR_MASK; 211 size = end - base + 1; 212 } else { 213 base = 0; 214 end = 0; 215 size = 0; 216 } 217 seq_printf(s, "imr%02i: base=%pa, end=%pa, size=0x%08zx " 218 "rmask=0x%08x, wmask=0x%08x, %s, %s\n", i, 219 &base, &end, size, imr.rmask, imr.wmask, 220 imr_is_enabled(&imr) ? "enabled " : "disabled", 221 imr.addr_lo & IMR_LOCK ? "locked" : "unlocked"); 222 } 223 224 mutex_unlock(&idev->lock); 225 return ret; 226 } 227 228 /** 229 * imr_state_open - debugfs open callback. 230 * 231 * @inode: pointer to struct inode. 232 * @file: pointer to struct file. 233 * @return: result of single open. 234 */ 235 static int imr_state_open(struct inode *inode, struct file *file) 236 { 237 return single_open(file, imr_dbgfs_state_show, inode->i_private); 238 } 239 240 static const struct file_operations imr_state_ops = { 241 .open = imr_state_open, 242 .read = seq_read, 243 .llseek = seq_lseek, 244 .release = single_release, 245 }; 246 247 /** 248 * imr_debugfs_register - register debugfs hooks. 249 * 250 * @idev: pointer to imr_device structure. 251 * @return: 0 on success - errno on failure. 252 */ 253 static int imr_debugfs_register(struct imr_device *idev) 254 { 255 idev->file = debugfs_create_file("imr_state", S_IFREG | S_IRUGO, NULL, 256 idev, &imr_state_ops); 257 return PTR_ERR_OR_ZERO(idev->file); 258 } 259 260 /** 261 * imr_check_params - check passed address range IMR alignment and non-zero size 262 * 263 * @base: base address of intended IMR. 264 * @size: size of intended IMR. 265 * @return: zero on valid range -EINVAL on unaligned base/size. 266 */ 267 static int imr_check_params(phys_addr_t base, size_t size) 268 { 269 if ((base & IMR_MASK) || (size & IMR_MASK)) { 270 pr_err("base %pa size 0x%08zx must align to 1KiB\n", 271 &base, size); 272 return -EINVAL; 273 } 274 if (size == 0) 275 return -EINVAL; 276 277 return 0; 278 } 279 280 /** 281 * imr_raw_size - account for the IMR_ALIGN bytes that addr_hi appends. 282 * 283 * IMR addr_hi has a built in offset of plus IMR_ALIGN (0x400) bytes from the 284 * value in the register. We need to subtract IMR_ALIGN bytes from input sizes 285 * as a result. 286 * 287 * @size: input size bytes. 288 * @return: reduced size. 289 */ 290 static inline size_t imr_raw_size(size_t size) 291 { 292 return size - IMR_ALIGN; 293 } 294 295 /** 296 * imr_address_overlap - detects an address overlap. 297 * 298 * @addr: address to check against an existing IMR. 299 * @imr: imr being checked. 300 * @return: true for overlap false for no overlap. 301 */ 302 static inline int imr_address_overlap(phys_addr_t addr, struct imr_regs *imr) 303 { 304 return addr >= imr_to_phys(imr->addr_lo) && addr <= imr_to_phys(imr->addr_hi); 305 } 306 307 /** 308 * imr_add_range - add an Isolated Memory Region. 309 * 310 * @base: physical base address of region aligned to 1KiB. 311 * @size: physical size of region in bytes must be aligned to 1KiB. 312 * @read_mask: read access mask. 313 * @write_mask: write access mask. 314 * @return: zero on success or negative value indicating error. 315 */ 316 int imr_add_range(phys_addr_t base, size_t size, 317 unsigned int rmask, unsigned int wmask) 318 { 319 phys_addr_t end; 320 unsigned int i; 321 struct imr_device *idev = &imr_dev; 322 struct imr_regs imr; 323 size_t raw_size; 324 int reg; 325 int ret; 326 327 if (WARN_ONCE(idev->init == false, "driver not initialized")) 328 return -ENODEV; 329 330 ret = imr_check_params(base, size); 331 if (ret) 332 return ret; 333 334 /* Tweak the size value. */ 335 raw_size = imr_raw_size(size); 336 end = base + raw_size; 337 338 /* 339 * Check for reserved IMR value common to firmware, kernel and grub 340 * indicating a disabled IMR. 341 */ 342 imr.addr_lo = phys_to_imr(base); 343 imr.addr_hi = phys_to_imr(end); 344 imr.rmask = rmask; 345 imr.wmask = wmask; 346 if (!imr_is_enabled(&imr)) 347 return -ENOTSUPP; 348 349 mutex_lock(&idev->lock); 350 351 /* 352 * Find a free IMR while checking for an existing overlapping range. 353 * Note there's no restriction in silicon to prevent IMR overlaps. 354 * For the sake of simplicity and ease in defining/debugging an IMR 355 * memory map we exclude IMR overlaps. 356 */ 357 reg = -1; 358 for (i = 0; i < idev->max_imr; i++) { 359 ret = imr_read(idev, i, &imr); 360 if (ret) 361 goto failed; 362 363 /* Find overlap @ base or end of requested range. */ 364 ret = -EINVAL; 365 if (imr_is_enabled(&imr)) { 366 if (imr_address_overlap(base, &imr)) 367 goto failed; 368 if (imr_address_overlap(end, &imr)) 369 goto failed; 370 } else { 371 reg = i; 372 } 373 } 374 375 /* Error out if we have no free IMR entries. */ 376 if (reg == -1) { 377 ret = -ENOMEM; 378 goto failed; 379 } 380 381 pr_debug("add %d phys %pa-%pa size %zx mask 0x%08x wmask 0x%08x\n", 382 reg, &base, &end, raw_size, rmask, wmask); 383 384 /* Enable IMR at specified range and access mask. */ 385 imr.addr_lo = phys_to_imr(base); 386 imr.addr_hi = phys_to_imr(end); 387 imr.rmask = rmask; 388 imr.wmask = wmask; 389 390 ret = imr_write(idev, reg, &imr); 391 if (ret < 0) { 392 /* 393 * In the highly unlikely event iosf_mbi_write failed 394 * attempt to rollback the IMR setup skipping the trapping 395 * of further IOSF write failures. 396 */ 397 imr.addr_lo = 0; 398 imr.addr_hi = 0; 399 imr.rmask = IMR_READ_ACCESS_ALL; 400 imr.wmask = IMR_WRITE_ACCESS_ALL; 401 imr_write(idev, reg, &imr); 402 } 403 failed: 404 mutex_unlock(&idev->lock); 405 return ret; 406 } 407 EXPORT_SYMBOL_GPL(imr_add_range); 408 409 /** 410 * __imr_remove_range - delete an Isolated Memory Region. 411 * 412 * This function allows you to delete an IMR by its index specified by reg or 413 * by address range specified by base and size respectively. If you specify an 414 * index on its own the base and size parameters are ignored. 415 * imr_remove_range(0, base, size); delete IMR at index 0 base/size ignored. 416 * imr_remove_range(-1, base, size); delete IMR from base to base+size. 417 * 418 * @reg: imr index to remove. 419 * @base: physical base address of region aligned to 1 KiB. 420 * @size: physical size of region in bytes aligned to 1 KiB. 421 * @return: -EINVAL on invalid range or out or range id 422 * -ENODEV if reg is valid but no IMR exists or is locked 423 * 0 on success. 424 */ 425 static int __imr_remove_range(int reg, phys_addr_t base, size_t size) 426 { 427 phys_addr_t end; 428 bool found = false; 429 unsigned int i; 430 struct imr_device *idev = &imr_dev; 431 struct imr_regs imr; 432 size_t raw_size; 433 int ret = 0; 434 435 if (WARN_ONCE(idev->init == false, "driver not initialized")) 436 return -ENODEV; 437 438 /* 439 * Validate address range if deleting by address, else we are 440 * deleting by index where base and size will be ignored. 441 */ 442 if (reg == -1) { 443 ret = imr_check_params(base, size); 444 if (ret) 445 return ret; 446 } 447 448 /* Tweak the size value. */ 449 raw_size = imr_raw_size(size); 450 end = base + raw_size; 451 452 mutex_lock(&idev->lock); 453 454 if (reg >= 0) { 455 /* If a specific IMR is given try to use it. */ 456 ret = imr_read(idev, reg, &imr); 457 if (ret) 458 goto failed; 459 460 if (!imr_is_enabled(&imr) || imr.addr_lo & IMR_LOCK) { 461 ret = -ENODEV; 462 goto failed; 463 } 464 found = true; 465 } else { 466 /* Search for match based on address range. */ 467 for (i = 0; i < idev->max_imr; i++) { 468 ret = imr_read(idev, i, &imr); 469 if (ret) 470 goto failed; 471 472 if (!imr_is_enabled(&imr) || imr.addr_lo & IMR_LOCK) 473 continue; 474 475 if ((imr_to_phys(imr.addr_lo) == base) && 476 (imr_to_phys(imr.addr_hi) == end)) { 477 found = true; 478 reg = i; 479 break; 480 } 481 } 482 } 483 484 if (!found) { 485 ret = -ENODEV; 486 goto failed; 487 } 488 489 pr_debug("remove %d phys %pa-%pa size %zx\n", reg, &base, &end, raw_size); 490 491 /* Tear down the IMR. */ 492 imr.addr_lo = 0; 493 imr.addr_hi = 0; 494 imr.rmask = IMR_READ_ACCESS_ALL; 495 imr.wmask = IMR_WRITE_ACCESS_ALL; 496 497 ret = imr_write(idev, reg, &imr); 498 499 failed: 500 mutex_unlock(&idev->lock); 501 return ret; 502 } 503 504 /** 505 * imr_remove_range - delete an Isolated Memory Region by address 506 * 507 * This function allows you to delete an IMR by an address range specified 508 * by base and size respectively. 509 * imr_remove_range(base, size); delete IMR from base to base+size. 510 * 511 * @base: physical base address of region aligned to 1 KiB. 512 * @size: physical size of region in bytes aligned to 1 KiB. 513 * @return: -EINVAL on invalid range or out or range id 514 * -ENODEV if reg is valid but no IMR exists or is locked 515 * 0 on success. 516 */ 517 int imr_remove_range(phys_addr_t base, size_t size) 518 { 519 return __imr_remove_range(-1, base, size); 520 } 521 EXPORT_SYMBOL_GPL(imr_remove_range); 522 523 /** 524 * imr_clear - delete an Isolated Memory Region by index 525 * 526 * This function allows you to delete an IMR by an address range specified 527 * by the index of the IMR. Useful for initial sanitization of the IMR 528 * address map. 529 * imr_ge(base, size); delete IMR from base to base+size. 530 * 531 * @reg: imr index to remove. 532 * @return: -EINVAL on invalid range or out or range id 533 * -ENODEV if reg is valid but no IMR exists or is locked 534 * 0 on success. 535 */ 536 static inline int imr_clear(int reg) 537 { 538 return __imr_remove_range(reg, 0, 0); 539 } 540 541 /** 542 * imr_fixup_memmap - Tear down IMRs used during bootup. 543 * 544 * BIOS and Grub both setup IMRs around compressed kernel, initrd memory 545 * that need to be removed before the kernel hands out one of the IMR 546 * encased addresses to a downstream DMA agent such as the SD or Ethernet. 547 * IMRs on Galileo are setup to immediately reset the system on violation. 548 * As a result if you're running a root filesystem from SD - you'll need 549 * the boot-time IMRs torn down or you'll find seemingly random resets when 550 * using your filesystem. 551 * 552 * @idev: pointer to imr_device structure. 553 * @return: 554 */ 555 static void __init imr_fixup_memmap(struct imr_device *idev) 556 { 557 phys_addr_t base = virt_to_phys(&_text); 558 size_t size = virt_to_phys(&__end_rodata) - base; 559 unsigned long start, end; 560 int i; 561 int ret; 562 563 /* Tear down all existing unlocked IMRs. */ 564 for (i = 0; i < idev->max_imr; i++) 565 imr_clear(i); 566 567 start = (unsigned long)_text; 568 end = (unsigned long)__end_rodata - 1; 569 570 /* 571 * Setup an unlocked IMR around the physical extent of the kernel 572 * from the beginning of the .text secton to the end of the 573 * .rodata section as one physically contiguous block. 574 * 575 * We don't round up @size since it is already PAGE_SIZE aligned. 576 * See vmlinux.lds.S for details. 577 */ 578 ret = imr_add_range(base, size, IMR_CPU, IMR_CPU); 579 if (ret < 0) { 580 pr_err("unable to setup IMR for kernel: %zu KiB (%lx - %lx)\n", 581 size / 1024, start, end); 582 } else { 583 pr_info("protecting kernel .text - .rodata: %zu KiB (%lx - %lx)\n", 584 size / 1024, start, end); 585 } 586 587 } 588 589 static const struct x86_cpu_id imr_ids[] __initconst = { 590 { X86_VENDOR_INTEL, 5, 9 }, /* Intel Quark SoC X1000. */ 591 {} 592 }; 593 594 /** 595 * imr_init - entry point for IMR driver. 596 * 597 * return: -ENODEV for no IMR support 0 if good to go. 598 */ 599 static int __init imr_init(void) 600 { 601 struct imr_device *idev = &imr_dev; 602 int ret; 603 604 if (!x86_match_cpu(imr_ids) || !iosf_mbi_available()) 605 return -ENODEV; 606 607 idev->max_imr = QUARK_X1000_IMR_MAX; 608 idev->reg_base = QUARK_X1000_IMR_REGBASE; 609 idev->init = true; 610 611 mutex_init(&idev->lock); 612 ret = imr_debugfs_register(idev); 613 if (ret != 0) 614 pr_warn("debugfs register failed!\n"); 615 imr_fixup_memmap(idev); 616 return 0; 617 } 618 device_initcall(imr_init); 619