1 /* 2 * Copyright © 2012 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Ben Widawsky <ben@bwidawsk.net> 25 * 26 */ 27 28 #include <linux/device.h> 29 #include <linux/module.h> 30 #include <linux/stat.h> 31 #include <linux/sysfs.h> 32 #include "intel_drv.h" 33 #include "i915_drv.h" 34 35 static inline struct drm_i915_private *kdev_minor_to_i915(struct device *kdev) 36 { 37 struct drm_minor *minor = dev_get_drvdata(kdev); 38 return to_i915(minor->dev); 39 } 40 41 #ifdef CONFIG_PM 42 static u32 calc_residency(struct drm_i915_private *dev_priv, 43 i915_reg_t reg) 44 { 45 u64 raw_time; /* 32b value may overflow during fixed point math */ 46 u64 units = 128ULL, div = 100000ULL; 47 u32 ret; 48 49 if (!intel_enable_rc6()) 50 return 0; 51 52 intel_runtime_pm_get(dev_priv); 53 54 /* On VLV and CHV, residency time is in CZ units rather than 1.28us */ 55 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 56 units = 1; 57 div = dev_priv->czclk_freq; 58 59 if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH) 60 units <<= 8; 61 } else if (IS_GEN9_LP(dev_priv)) { 62 units = 1; 63 div = 1200; /* 833.33ns */ 64 } 65 66 raw_time = I915_READ(reg) * units; 67 ret = DIV_ROUND_UP_ULL(raw_time, div); 68 69 intel_runtime_pm_put(dev_priv); 70 return ret; 71 } 72 73 static ssize_t 74 show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf) 75 { 76 return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6()); 77 } 78 79 static ssize_t 80 show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf) 81 { 82 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); 83 u32 rc6_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6); 84 return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency); 85 } 86 87 static ssize_t 88 show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf) 89 { 90 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); 91 u32 rc6p_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6p); 92 return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency); 93 } 94 95 static ssize_t 96 show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf) 97 { 98 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); 99 u32 rc6pp_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6pp); 100 return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency); 101 } 102 103 static ssize_t 104 show_media_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf) 105 { 106 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); 107 u32 rc6_residency = calc_residency(dev_priv, VLV_GT_MEDIA_RC6); 108 return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency); 109 } 110 111 static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL); 112 static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL); 113 static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL); 114 static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL); 115 static DEVICE_ATTR(media_rc6_residency_ms, S_IRUGO, show_media_rc6_ms, NULL); 116 117 static struct attribute *rc6_attrs[] = { 118 &dev_attr_rc6_enable.attr, 119 &dev_attr_rc6_residency_ms.attr, 120 NULL 121 }; 122 123 static struct attribute_group rc6_attr_group = { 124 .name = power_group_name, 125 .attrs = rc6_attrs 126 }; 127 128 static struct attribute *rc6p_attrs[] = { 129 &dev_attr_rc6p_residency_ms.attr, 130 &dev_attr_rc6pp_residency_ms.attr, 131 NULL 132 }; 133 134 static struct attribute_group rc6p_attr_group = { 135 .name = power_group_name, 136 .attrs = rc6p_attrs 137 }; 138 139 static struct attribute *media_rc6_attrs[] = { 140 &dev_attr_media_rc6_residency_ms.attr, 141 NULL 142 }; 143 144 static struct attribute_group media_rc6_attr_group = { 145 .name = power_group_name, 146 .attrs = media_rc6_attrs 147 }; 148 #endif 149 150 static int l3_access_valid(struct drm_i915_private *dev_priv, loff_t offset) 151 { 152 if (!HAS_L3_DPF(dev_priv)) 153 return -EPERM; 154 155 if (offset % 4 != 0) 156 return -EINVAL; 157 158 if (offset >= GEN7_L3LOG_SIZE) 159 return -ENXIO; 160 161 return 0; 162 } 163 164 static ssize_t 165 i915_l3_read(struct file *filp, struct kobject *kobj, 166 struct bin_attribute *attr, char *buf, 167 loff_t offset, size_t count) 168 { 169 struct device *kdev = kobj_to_dev(kobj); 170 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); 171 struct drm_device *dev = &dev_priv->drm; 172 int slice = (int)(uintptr_t)attr->private; 173 int ret; 174 175 count = round_down(count, 4); 176 177 ret = l3_access_valid(dev_priv, offset); 178 if (ret) 179 return ret; 180 181 count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count); 182 183 ret = i915_mutex_lock_interruptible(dev); 184 if (ret) 185 return ret; 186 187 if (dev_priv->l3_parity.remap_info[slice]) 188 memcpy(buf, 189 dev_priv->l3_parity.remap_info[slice] + (offset/4), 190 count); 191 else 192 memset(buf, 0, count); 193 194 mutex_unlock(&dev->struct_mutex); 195 196 return count; 197 } 198 199 static ssize_t 200 i915_l3_write(struct file *filp, struct kobject *kobj, 201 struct bin_attribute *attr, char *buf, 202 loff_t offset, size_t count) 203 { 204 struct device *kdev = kobj_to_dev(kobj); 205 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); 206 struct drm_device *dev = &dev_priv->drm; 207 struct i915_gem_context *ctx; 208 u32 *temp = NULL; /* Just here to make handling failures easy */ 209 int slice = (int)(uintptr_t)attr->private; 210 int ret; 211 212 if (!HAS_HW_CONTEXTS(dev_priv)) 213 return -ENXIO; 214 215 ret = l3_access_valid(dev_priv, offset); 216 if (ret) 217 return ret; 218 219 ret = i915_mutex_lock_interruptible(dev); 220 if (ret) 221 return ret; 222 223 if (!dev_priv->l3_parity.remap_info[slice]) { 224 temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL); 225 if (!temp) { 226 mutex_unlock(&dev->struct_mutex); 227 return -ENOMEM; 228 } 229 } 230 231 /* TODO: Ideally we really want a GPU reset here to make sure errors 232 * aren't propagated. Since I cannot find a stable way to reset the GPU 233 * at this point it is left as a TODO. 234 */ 235 if (temp) 236 dev_priv->l3_parity.remap_info[slice] = temp; 237 238 memcpy(dev_priv->l3_parity.remap_info[slice] + (offset/4), buf, count); 239 240 /* NB: We defer the remapping until we switch to the context */ 241 list_for_each_entry(ctx, &dev_priv->context_list, link) 242 ctx->remap_slice |= (1<<slice); 243 244 mutex_unlock(&dev->struct_mutex); 245 246 return count; 247 } 248 249 static struct bin_attribute dpf_attrs = { 250 .attr = {.name = "l3_parity", .mode = (S_IRUSR | S_IWUSR)}, 251 .size = GEN7_L3LOG_SIZE, 252 .read = i915_l3_read, 253 .write = i915_l3_write, 254 .mmap = NULL, 255 .private = (void *)0 256 }; 257 258 static struct bin_attribute dpf_attrs_1 = { 259 .attr = {.name = "l3_parity_slice_1", .mode = (S_IRUSR | S_IWUSR)}, 260 .size = GEN7_L3LOG_SIZE, 261 .read = i915_l3_read, 262 .write = i915_l3_write, 263 .mmap = NULL, 264 .private = (void *)1 265 }; 266 267 static ssize_t gt_act_freq_mhz_show(struct device *kdev, 268 struct device_attribute *attr, char *buf) 269 { 270 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); 271 int ret; 272 273 intel_runtime_pm_get(dev_priv); 274 275 mutex_lock(&dev_priv->rps.hw_lock); 276 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 277 u32 freq; 278 freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 279 ret = intel_gpu_freq(dev_priv, (freq >> 8) & 0xff); 280 } else { 281 u32 rpstat = I915_READ(GEN6_RPSTAT1); 282 if (IS_GEN9(dev_priv)) 283 ret = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT; 284 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 285 ret = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT; 286 else 287 ret = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT; 288 ret = intel_gpu_freq(dev_priv, ret); 289 } 290 mutex_unlock(&dev_priv->rps.hw_lock); 291 292 intel_runtime_pm_put(dev_priv); 293 294 return snprintf(buf, PAGE_SIZE, "%d\n", ret); 295 } 296 297 static ssize_t gt_cur_freq_mhz_show(struct device *kdev, 298 struct device_attribute *attr, char *buf) 299 { 300 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); 301 302 return snprintf(buf, PAGE_SIZE, "%d\n", 303 intel_gpu_freq(dev_priv, 304 dev_priv->rps.cur_freq)); 305 } 306 307 static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) 308 { 309 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); 310 311 return snprintf(buf, PAGE_SIZE, "%d\n", 312 intel_gpu_freq(dev_priv, 313 dev_priv->rps.boost_freq)); 314 } 315 316 static ssize_t gt_boost_freq_mhz_store(struct device *kdev, 317 struct device_attribute *attr, 318 const char *buf, size_t count) 319 { 320 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); 321 u32 val; 322 ssize_t ret; 323 324 ret = kstrtou32(buf, 0, &val); 325 if (ret) 326 return ret; 327 328 /* Validate against (static) hardware limits */ 329 val = intel_freq_opcode(dev_priv, val); 330 if (val < dev_priv->rps.min_freq || val > dev_priv->rps.max_freq) 331 return -EINVAL; 332 333 mutex_lock(&dev_priv->rps.hw_lock); 334 dev_priv->rps.boost_freq = val; 335 mutex_unlock(&dev_priv->rps.hw_lock); 336 337 return count; 338 } 339 340 static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev, 341 struct device_attribute *attr, char *buf) 342 { 343 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); 344 345 return snprintf(buf, PAGE_SIZE, "%d\n", 346 intel_gpu_freq(dev_priv, 347 dev_priv->rps.efficient_freq)); 348 } 349 350 static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) 351 { 352 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); 353 354 return snprintf(buf, PAGE_SIZE, "%d\n", 355 intel_gpu_freq(dev_priv, 356 dev_priv->rps.max_freq_softlimit)); 357 } 358 359 static ssize_t gt_max_freq_mhz_store(struct device *kdev, 360 struct device_attribute *attr, 361 const char *buf, size_t count) 362 { 363 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); 364 u32 val; 365 ssize_t ret; 366 367 ret = kstrtou32(buf, 0, &val); 368 if (ret) 369 return ret; 370 371 intel_runtime_pm_get(dev_priv); 372 373 mutex_lock(&dev_priv->rps.hw_lock); 374 375 val = intel_freq_opcode(dev_priv, val); 376 377 if (val < dev_priv->rps.min_freq || 378 val > dev_priv->rps.max_freq || 379 val < dev_priv->rps.min_freq_softlimit) { 380 mutex_unlock(&dev_priv->rps.hw_lock); 381 intel_runtime_pm_put(dev_priv); 382 return -EINVAL; 383 } 384 385 if (val > dev_priv->rps.rp0_freq) 386 DRM_DEBUG("User requested overclocking to %d\n", 387 intel_gpu_freq(dev_priv, val)); 388 389 dev_priv->rps.max_freq_softlimit = val; 390 391 val = clamp_t(int, dev_priv->rps.cur_freq, 392 dev_priv->rps.min_freq_softlimit, 393 dev_priv->rps.max_freq_softlimit); 394 395 /* We still need *_set_rps to process the new max_delay and 396 * update the interrupt limits and PMINTRMSK even though 397 * frequency request may be unchanged. */ 398 intel_set_rps(dev_priv, val); 399 400 mutex_unlock(&dev_priv->rps.hw_lock); 401 402 intel_runtime_pm_put(dev_priv); 403 404 return count; 405 } 406 407 static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) 408 { 409 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); 410 411 return snprintf(buf, PAGE_SIZE, "%d\n", 412 intel_gpu_freq(dev_priv, 413 dev_priv->rps.min_freq_softlimit)); 414 } 415 416 static ssize_t gt_min_freq_mhz_store(struct device *kdev, 417 struct device_attribute *attr, 418 const char *buf, size_t count) 419 { 420 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); 421 u32 val; 422 ssize_t ret; 423 424 ret = kstrtou32(buf, 0, &val); 425 if (ret) 426 return ret; 427 428 intel_runtime_pm_get(dev_priv); 429 430 mutex_lock(&dev_priv->rps.hw_lock); 431 432 val = intel_freq_opcode(dev_priv, val); 433 434 if (val < dev_priv->rps.min_freq || 435 val > dev_priv->rps.max_freq || 436 val > dev_priv->rps.max_freq_softlimit) { 437 mutex_unlock(&dev_priv->rps.hw_lock); 438 intel_runtime_pm_put(dev_priv); 439 return -EINVAL; 440 } 441 442 dev_priv->rps.min_freq_softlimit = val; 443 444 val = clamp_t(int, dev_priv->rps.cur_freq, 445 dev_priv->rps.min_freq_softlimit, 446 dev_priv->rps.max_freq_softlimit); 447 448 /* We still need *_set_rps to process the new min_delay and 449 * update the interrupt limits and PMINTRMSK even though 450 * frequency request may be unchanged. */ 451 intel_set_rps(dev_priv, val); 452 453 mutex_unlock(&dev_priv->rps.hw_lock); 454 455 intel_runtime_pm_put(dev_priv); 456 457 return count; 458 459 } 460 461 static DEVICE_ATTR(gt_act_freq_mhz, S_IRUGO, gt_act_freq_mhz_show, NULL); 462 static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL); 463 static DEVICE_ATTR(gt_boost_freq_mhz, S_IRUGO | S_IWUSR, gt_boost_freq_mhz_show, gt_boost_freq_mhz_store); 464 static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store); 465 static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store); 466 467 static DEVICE_ATTR(vlv_rpe_freq_mhz, S_IRUGO, vlv_rpe_freq_mhz_show, NULL); 468 469 static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf); 470 static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL); 471 static DEVICE_ATTR(gt_RP1_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL); 472 static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL); 473 474 /* For now we have a static number of RP states */ 475 static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) 476 { 477 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); 478 u32 val; 479 480 if (attr == &dev_attr_gt_RP0_freq_mhz) 481 val = intel_gpu_freq(dev_priv, dev_priv->rps.rp0_freq); 482 else if (attr == &dev_attr_gt_RP1_freq_mhz) 483 val = intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq); 484 else if (attr == &dev_attr_gt_RPn_freq_mhz) 485 val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq); 486 else 487 BUG(); 488 489 return snprintf(buf, PAGE_SIZE, "%d\n", val); 490 } 491 492 static const struct attribute *gen6_attrs[] = { 493 &dev_attr_gt_act_freq_mhz.attr, 494 &dev_attr_gt_cur_freq_mhz.attr, 495 &dev_attr_gt_boost_freq_mhz.attr, 496 &dev_attr_gt_max_freq_mhz.attr, 497 &dev_attr_gt_min_freq_mhz.attr, 498 &dev_attr_gt_RP0_freq_mhz.attr, 499 &dev_attr_gt_RP1_freq_mhz.attr, 500 &dev_attr_gt_RPn_freq_mhz.attr, 501 NULL, 502 }; 503 504 static const struct attribute *vlv_attrs[] = { 505 &dev_attr_gt_act_freq_mhz.attr, 506 &dev_attr_gt_cur_freq_mhz.attr, 507 &dev_attr_gt_boost_freq_mhz.attr, 508 &dev_attr_gt_max_freq_mhz.attr, 509 &dev_attr_gt_min_freq_mhz.attr, 510 &dev_attr_gt_RP0_freq_mhz.attr, 511 &dev_attr_gt_RP1_freq_mhz.attr, 512 &dev_attr_gt_RPn_freq_mhz.attr, 513 &dev_attr_vlv_rpe_freq_mhz.attr, 514 NULL, 515 }; 516 517 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) 518 519 static ssize_t error_state_read(struct file *filp, struct kobject *kobj, 520 struct bin_attribute *attr, char *buf, 521 loff_t off, size_t count) 522 { 523 524 struct device *kdev = kobj_to_dev(kobj); 525 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); 526 struct drm_device *dev = &dev_priv->drm; 527 struct i915_error_state_file_priv error_priv; 528 struct drm_i915_error_state_buf error_str; 529 ssize_t ret_count = 0; 530 int ret; 531 532 memset(&error_priv, 0, sizeof(error_priv)); 533 534 ret = i915_error_state_buf_init(&error_str, to_i915(dev), count, off); 535 if (ret) 536 return ret; 537 538 error_priv.i915 = dev_priv; 539 i915_error_state_get(dev, &error_priv); 540 541 ret = i915_error_state_to_str(&error_str, &error_priv); 542 if (ret) 543 goto out; 544 545 ret_count = count < error_str.bytes ? count : error_str.bytes; 546 547 memcpy(buf, error_str.buf, ret_count); 548 out: 549 i915_error_state_put(&error_priv); 550 i915_error_state_buf_release(&error_str); 551 552 return ret ?: ret_count; 553 } 554 555 static ssize_t error_state_write(struct file *file, struct kobject *kobj, 556 struct bin_attribute *attr, char *buf, 557 loff_t off, size_t count) 558 { 559 struct device *kdev = kobj_to_dev(kobj); 560 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); 561 562 DRM_DEBUG_DRIVER("Resetting error state\n"); 563 i915_destroy_error_state(dev_priv); 564 565 return count; 566 } 567 568 static struct bin_attribute error_state_attr = { 569 .attr.name = "error", 570 .attr.mode = S_IRUSR | S_IWUSR, 571 .size = 0, 572 .read = error_state_read, 573 .write = error_state_write, 574 }; 575 576 static void i915_setup_error_capture(struct device *kdev) 577 { 578 if (sysfs_create_bin_file(&kdev->kobj, &error_state_attr)) 579 DRM_ERROR("error_state sysfs setup failed\n"); 580 } 581 582 static void i915_teardown_error_capture(struct device *kdev) 583 { 584 sysfs_remove_bin_file(&kdev->kobj, &error_state_attr); 585 } 586 #else 587 static void i915_setup_error_capture(struct device *kdev) {} 588 static void i915_teardown_error_capture(struct device *kdev) {} 589 #endif 590 591 void i915_setup_sysfs(struct drm_i915_private *dev_priv) 592 { 593 struct device *kdev = dev_priv->drm.primary->kdev; 594 int ret; 595 596 #ifdef CONFIG_PM 597 if (HAS_RC6(dev_priv)) { 598 ret = sysfs_merge_group(&kdev->kobj, 599 &rc6_attr_group); 600 if (ret) 601 DRM_ERROR("RC6 residency sysfs setup failed\n"); 602 } 603 if (HAS_RC6p(dev_priv)) { 604 ret = sysfs_merge_group(&kdev->kobj, 605 &rc6p_attr_group); 606 if (ret) 607 DRM_ERROR("RC6p residency sysfs setup failed\n"); 608 } 609 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 610 ret = sysfs_merge_group(&kdev->kobj, 611 &media_rc6_attr_group); 612 if (ret) 613 DRM_ERROR("Media RC6 residency sysfs setup failed\n"); 614 } 615 #endif 616 if (HAS_L3_DPF(dev_priv)) { 617 ret = device_create_bin_file(kdev, &dpf_attrs); 618 if (ret) 619 DRM_ERROR("l3 parity sysfs setup failed\n"); 620 621 if (NUM_L3_SLICES(dev_priv) > 1) { 622 ret = device_create_bin_file(kdev, 623 &dpf_attrs_1); 624 if (ret) 625 DRM_ERROR("l3 parity slice 1 setup failed\n"); 626 } 627 } 628 629 ret = 0; 630 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 631 ret = sysfs_create_files(&kdev->kobj, vlv_attrs); 632 else if (INTEL_GEN(dev_priv) >= 6) 633 ret = sysfs_create_files(&kdev->kobj, gen6_attrs); 634 if (ret) 635 DRM_ERROR("RPS sysfs setup failed\n"); 636 637 i915_setup_error_capture(kdev); 638 } 639 640 void i915_teardown_sysfs(struct drm_i915_private *dev_priv) 641 { 642 struct device *kdev = dev_priv->drm.primary->kdev; 643 644 i915_teardown_error_capture(kdev); 645 646 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 647 sysfs_remove_files(&kdev->kobj, vlv_attrs); 648 else 649 sysfs_remove_files(&kdev->kobj, gen6_attrs); 650 device_remove_bin_file(kdev, &dpf_attrs_1); 651 device_remove_bin_file(kdev, &dpf_attrs); 652 #ifdef CONFIG_PM 653 sysfs_unmerge_group(&kdev->kobj, &rc6_attr_group); 654 sysfs_unmerge_group(&kdev->kobj, &rc6p_attr_group); 655 #endif 656 } 657