1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2019 Intel Corporation 4 */ 5 6 #include <linux/prandom.h> 7 8 #include <uapi/drm/i915_drm.h> 9 10 #include "intel_memory_region.h" 11 #include "i915_drv.h" 12 #include "i915_ttm_buddy_manager.h" 13 14 static const struct { 15 u16 class; 16 u16 instance; 17 } intel_region_map[] = { 18 [INTEL_REGION_SMEM] = { 19 .class = INTEL_MEMORY_SYSTEM, 20 .instance = 0, 21 }, 22 [INTEL_REGION_LMEM_0] = { 23 .class = INTEL_MEMORY_LOCAL, 24 .instance = 0, 25 }, 26 [INTEL_REGION_STOLEN_SMEM] = { 27 .class = INTEL_MEMORY_STOLEN_SYSTEM, 28 .instance = 0, 29 }, 30 [INTEL_REGION_STOLEN_LMEM] = { 31 .class = INTEL_MEMORY_STOLEN_LOCAL, 32 .instance = 0, 33 }, 34 }; 35 36 static int __iopagetest(struct intel_memory_region *mem, 37 u8 __iomem *va, int pagesize, 38 u8 value, resource_size_t offset, 39 const void *caller) 40 { 41 int byte = get_random_u32_below(pagesize); 42 u8 result[3]; 43 44 memset_io(va, value, pagesize); /* or GPF! */ 45 wmb(); 46 47 result[0] = ioread8(va); 48 result[1] = ioread8(va + byte); 49 result[2] = ioread8(va + pagesize - 1); 50 if (memchr_inv(result, value, sizeof(result))) { 51 dev_err(mem->i915->drm.dev, 52 "Failed to read back from memory region:%pR at [%pa + %pa] for %ps; wrote %x, read (%x, %x, %x)\n", 53 &mem->region, &mem->io.start, &offset, caller, 54 value, result[0], result[1], result[2]); 55 return -EINVAL; 56 } 57 58 return 0; 59 } 60 61 static int iopagetest(struct intel_memory_region *mem, 62 resource_size_t offset, 63 const void *caller) 64 { 65 const u8 val[] = { 0x0, 0xa5, 0xc3, 0xf0 }; 66 void __iomem *va; 67 int err; 68 int i; 69 70 va = ioremap_wc(mem->io.start + offset, PAGE_SIZE); 71 if (!va) { 72 dev_err(mem->i915->drm.dev, 73 "Failed to ioremap memory region [%pa + %pa] for %ps\n", 74 &mem->io.start, &offset, caller); 75 return -EFAULT; 76 } 77 78 for (i = 0; i < ARRAY_SIZE(val); i++) { 79 err = __iopagetest(mem, va, PAGE_SIZE, val[i], offset, caller); 80 if (err) 81 break; 82 83 err = __iopagetest(mem, va, PAGE_SIZE, ~val[i], offset, caller); 84 if (err) 85 break; 86 } 87 88 iounmap(va); 89 return err; 90 } 91 92 static resource_size_t random_page(resource_size_t last) 93 { 94 /* Limited to low 44b (16TiB), but should suffice for a spot check */ 95 return get_random_u32_below(last >> PAGE_SHIFT) << PAGE_SHIFT; 96 } 97 98 static int iomemtest(struct intel_memory_region *mem, 99 bool test_all, 100 const void *caller) 101 { 102 resource_size_t last, page; 103 int err; 104 105 if (resource_size(&mem->io) < PAGE_SIZE) 106 return 0; 107 108 last = resource_size(&mem->io) - PAGE_SIZE; 109 110 /* 111 * Quick test to check read/write access to the iomap (backing store). 112 * 113 * Write a byte, read it back. If the iomapping fails, we expect 114 * a GPF preventing further execution. If the backing store does not 115 * exist, the read back will return garbage. We check a couple of pages, 116 * the first and last of the specified region to confirm the backing 117 * store + iomap does cover the entire memory region; and we check 118 * a random offset within as a quick spot check for bad memory. 119 */ 120 121 if (test_all) { 122 for (page = 0; page <= last; page += PAGE_SIZE) { 123 err = iopagetest(mem, page, caller); 124 if (err) 125 return err; 126 } 127 } else { 128 err = iopagetest(mem, 0, caller); 129 if (err) 130 return err; 131 132 err = iopagetest(mem, last, caller); 133 if (err) 134 return err; 135 136 err = iopagetest(mem, random_page(last), caller); 137 if (err) 138 return err; 139 } 140 141 return 0; 142 } 143 144 struct intel_memory_region * 145 intel_memory_region_lookup(struct drm_i915_private *i915, 146 u16 class, u16 instance) 147 { 148 struct intel_memory_region *mr; 149 int id; 150 151 /* XXX: consider maybe converting to an rb tree at some point */ 152 for_each_memory_region(mr, i915, id) { 153 if (mr->type == class && mr->instance == instance) 154 return mr; 155 } 156 157 return NULL; 158 } 159 160 struct intel_memory_region * 161 intel_memory_region_by_type(struct drm_i915_private *i915, 162 enum intel_memory_type mem_type) 163 { 164 struct intel_memory_region *mr; 165 int id; 166 167 for_each_memory_region(mr, i915, id) 168 if (mr->type == mem_type) 169 return mr; 170 171 return NULL; 172 } 173 174 bool intel_memory_type_is_local(enum intel_memory_type mem_type) 175 { 176 switch (mem_type) { 177 case INTEL_MEMORY_LOCAL: 178 case INTEL_MEMORY_STOLEN_LOCAL: 179 return true; 180 default: 181 return false; 182 } 183 } 184 185 /** 186 * intel_memory_region_reserve - Reserve a memory range 187 * @mem: The region for which we want to reserve a range. 188 * @offset: Start of the range to reserve. 189 * @size: The size of the range to reserve. 190 * 191 * Return: 0 on success, negative error code on failure. 192 */ 193 int intel_memory_region_reserve(struct intel_memory_region *mem, 194 resource_size_t offset, 195 resource_size_t size) 196 { 197 struct ttm_resource_manager *man = mem->region_private; 198 199 GEM_BUG_ON(mem->is_range_manager); 200 201 return i915_ttm_buddy_man_reserve(man, offset, size); 202 } 203 204 void intel_memory_region_debug(struct intel_memory_region *mr, 205 struct drm_printer *printer) 206 { 207 drm_printf(printer, "%s: ", mr->name); 208 209 if (mr->region_private) 210 ttm_resource_manager_debug(mr->region_private, printer); 211 else 212 drm_printf(printer, "total:%pa bytes\n", &mr->total); 213 } 214 215 static int intel_memory_region_memtest(struct intel_memory_region *mem, 216 void *caller) 217 { 218 struct drm_i915_private *i915 = mem->i915; 219 int err = 0; 220 221 if (!mem->io.start) 222 return 0; 223 224 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) || i915->params.memtest) 225 err = iomemtest(mem, i915->params.memtest, caller); 226 227 return err; 228 } 229 230 const char *intel_memory_type_str(enum intel_memory_type type) 231 { 232 switch (type) { 233 case INTEL_MEMORY_SYSTEM: 234 return "system"; 235 case INTEL_MEMORY_LOCAL: 236 return "local"; 237 case INTEL_MEMORY_STOLEN_LOCAL: 238 return "stolen-local"; 239 case INTEL_MEMORY_STOLEN_SYSTEM: 240 return "stolen-system"; 241 default: 242 return "unknown"; 243 } 244 } 245 246 struct intel_memory_region * 247 intel_memory_region_create(struct drm_i915_private *i915, 248 resource_size_t start, 249 resource_size_t size, 250 resource_size_t min_page_size, 251 resource_size_t io_start, 252 resource_size_t io_size, 253 u16 type, 254 u16 instance, 255 const struct intel_memory_region_ops *ops) 256 { 257 struct intel_memory_region *mem; 258 int err; 259 260 mem = kzalloc(sizeof(*mem), GFP_KERNEL); 261 if (!mem) 262 return ERR_PTR(-ENOMEM); 263 264 mem->i915 = i915; 265 mem->region = DEFINE_RES_MEM(start, size); 266 mem->io = DEFINE_RES_MEM(io_start, io_size); 267 mem->min_page_size = min_page_size; 268 mem->ops = ops; 269 mem->total = size; 270 mem->type = type; 271 mem->instance = instance; 272 273 snprintf(mem->uabi_name, sizeof(mem->uabi_name), "%s%u", 274 intel_memory_type_str(type), instance); 275 276 mutex_init(&mem->objects.lock); 277 INIT_LIST_HEAD(&mem->objects.list); 278 279 if (ops->init) { 280 err = ops->init(mem); 281 if (err) 282 goto err_free; 283 } 284 285 err = intel_memory_region_memtest(mem, (void *)_RET_IP_); 286 if (err) 287 goto err_release; 288 289 return mem; 290 291 err_release: 292 if (mem->ops->release) 293 mem->ops->release(mem); 294 err_free: 295 kfree(mem); 296 return ERR_PTR(err); 297 } 298 299 void intel_memory_region_set_name(struct intel_memory_region *mem, 300 const char *fmt, ...) 301 { 302 va_list ap; 303 304 va_start(ap, fmt); 305 vsnprintf(mem->name, sizeof(mem->name), fmt, ap); 306 va_end(ap); 307 } 308 309 void intel_memory_region_avail(struct intel_memory_region *mr, 310 u64 *avail, u64 *visible_avail) 311 { 312 if (mr->type == INTEL_MEMORY_LOCAL) { 313 i915_ttm_buddy_man_avail(mr->region_private, 314 avail, visible_avail); 315 *avail <<= PAGE_SHIFT; 316 *visible_avail <<= PAGE_SHIFT; 317 } else { 318 *avail = mr->total; 319 *visible_avail = mr->total; 320 } 321 } 322 323 void intel_memory_region_destroy(struct intel_memory_region *mem) 324 { 325 int ret = 0; 326 327 if (mem->ops->release) 328 ret = mem->ops->release(mem); 329 330 GEM_WARN_ON(!list_empty_careful(&mem->objects.list)); 331 mutex_destroy(&mem->objects.lock); 332 if (!ret) 333 kfree(mem); 334 } 335 336 /* Global memory region registration -- only slight layer inversions! */ 337 338 int intel_memory_regions_hw_probe(struct drm_i915_private *i915) 339 { 340 int err, i; 341 342 for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) { 343 struct intel_memory_region *mem = ERR_PTR(-ENODEV); 344 u16 type, instance; 345 346 if (!HAS_REGION(i915, i)) 347 continue; 348 349 type = intel_region_map[i].class; 350 instance = intel_region_map[i].instance; 351 switch (type) { 352 case INTEL_MEMORY_SYSTEM: 353 if (IS_DGFX(i915)) 354 mem = i915_gem_ttm_system_setup(i915, type, 355 instance); 356 else 357 mem = i915_gem_shmem_setup(i915, type, 358 instance); 359 break; 360 case INTEL_MEMORY_STOLEN_LOCAL: 361 mem = i915_gem_stolen_lmem_setup(i915, type, instance); 362 if (!IS_ERR(mem)) 363 i915->mm.stolen_region = mem; 364 break; 365 case INTEL_MEMORY_STOLEN_SYSTEM: 366 mem = i915_gem_stolen_smem_setup(i915, type, instance); 367 if (!IS_ERR(mem)) 368 i915->mm.stolen_region = mem; 369 break; 370 default: 371 continue; 372 } 373 374 if (IS_ERR(mem)) { 375 err = PTR_ERR(mem); 376 drm_err(&i915->drm, 377 "Failed to setup region(%d) type=%d\n", 378 err, type); 379 goto out_cleanup; 380 } 381 382 if (mem) { /* Skip on non-fatal errors */ 383 mem->id = i; 384 i915->mm.regions[i] = mem; 385 } 386 } 387 388 for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) { 389 struct intel_memory_region *mem = i915->mm.regions[i]; 390 u64 region_size, io_size; 391 392 if (!mem) 393 continue; 394 395 region_size = resource_size(&mem->region) >> 20; 396 io_size = resource_size(&mem->io) >> 20; 397 398 if (resource_size(&mem->io)) 399 drm_dbg(&i915->drm, "Memory region(%d): %s: %llu MiB %pR, io: %llu MiB %pR\n", 400 mem->id, mem->name, region_size, &mem->region, io_size, &mem->io); 401 else 402 drm_dbg(&i915->drm, "Memory region(%d): %s: %llu MiB %pR, io: n/a\n", 403 mem->id, mem->name, region_size, &mem->region); 404 } 405 406 return 0; 407 408 out_cleanup: 409 intel_memory_regions_driver_release(i915); 410 return err; 411 } 412 413 void intel_memory_regions_driver_release(struct drm_i915_private *i915) 414 { 415 int i; 416 417 for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) { 418 struct intel_memory_region *region = 419 fetch_and_zero(&i915->mm.regions[i]); 420 421 if (region) 422 intel_memory_region_destroy(region); 423 } 424 } 425 426 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 427 #include "selftests/intel_memory_region.c" 428 #include "selftests/mock_region.c" 429 #endif 430