xref: /linux/drivers/gpu/drm/i915/gem/i915_gemfs.c (revision b499914eb83765a27e3b43f216e9d1bdf4265418)
110be98a7SChris Wilson /*
210be98a7SChris Wilson  * SPDX-License-Identifier: MIT
310be98a7SChris Wilson  *
410be98a7SChris Wilson  * Copyright © 2017 Intel Corporation
510be98a7SChris Wilson  */
610be98a7SChris Wilson 
710be98a7SChris Wilson #include <linux/fs.h>
810be98a7SChris Wilson #include <linux/mount.h>
910be98a7SChris Wilson 
1010be98a7SChris Wilson #include "i915_drv.h"
1110be98a7SChris Wilson #include "i915_gemfs.h"
12a7f46d5bSTvrtko Ursulin #include "i915_utils.h"
1310be98a7SChris Wilson 
14*b499914eSTvrtko Ursulin void i915_gemfs_init(struct drm_i915_private *i915)
1510be98a7SChris Wilson {
163ccadbceSMatthew Auld 	char huge_opt[] = "huge=within_size"; /* r/w */
1710be98a7SChris Wilson 	struct file_system_type *type;
1810be98a7SChris Wilson 	struct vfsmount *gemfs;
1910be98a7SChris Wilson 
2072e67f04SChris Wilson 	/*
2172e67f04SChris Wilson 	 * By creating our own shmemfs mountpoint, we can pass in
2272e67f04SChris Wilson 	 * mount flags that better match our usecase.
2372e67f04SChris Wilson 	 *
2472e67f04SChris Wilson 	 * One example, although it is probably better with a per-file
2572e67f04SChris Wilson 	 * control, is selecting huge page allocations ("huge=within_size").
2623dd74dbSTvrtko Ursulin 	 * However, we only do so on platforms which benefit from it, or to
2723dd74dbSTvrtko Ursulin 	 * offset the overhead of iommu lookups, where with latter it is a net
2823dd74dbSTvrtko Ursulin 	 * win even on platforms which would otherwise see some performance
2923dd74dbSTvrtko Ursulin 	 * regressions such a slow reads issue on Broadwell and Skylake.
3072e67f04SChris Wilson 	 */
3172e67f04SChris Wilson 
32*b499914eSTvrtko Ursulin 	if (GRAPHICS_VER(i915) < 11 && !i915_vtd_active(i915))
33*b499914eSTvrtko Ursulin 		return;
3474388ca4STvrtko Ursulin 
35*b499914eSTvrtko Ursulin 	if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
36*b499914eSTvrtko Ursulin 		goto err;
37*b499914eSTvrtko Ursulin 
38*b499914eSTvrtko Ursulin 	type = get_fs_type("tmpfs");
39*b499914eSTvrtko Ursulin 	if (!type)
40*b499914eSTvrtko Ursulin 		goto err;
41*b499914eSTvrtko Ursulin 
42*b499914eSTvrtko Ursulin 	gemfs = vfs_kern_mount(type, SB_KERNMOUNT, type->name, huge_opt);
4310be98a7SChris Wilson 	if (IS_ERR(gemfs))
44*b499914eSTvrtko Ursulin 		goto err;
4510be98a7SChris Wilson 
4610be98a7SChris Wilson 	i915->mm.gemfs = gemfs;
47*b499914eSTvrtko Ursulin 	drm_info(&i915->drm, "Using Transparent Hugepages\n");
48*b499914eSTvrtko Ursulin 	return;
4910be98a7SChris Wilson 
50*b499914eSTvrtko Ursulin err:
51*b499914eSTvrtko Ursulin 	drm_notice(&i915->drm,
52*b499914eSTvrtko Ursulin 		   "Transparent Hugepage support is recommended for optimal performance%s\n",
53*b499914eSTvrtko Ursulin 		   GRAPHICS_VER(i915) >= 11 ? " on this platform!" :
54*b499914eSTvrtko Ursulin 					      " when IOMMU is enabled!");
5510be98a7SChris Wilson }
5610be98a7SChris Wilson 
5710be98a7SChris Wilson void i915_gemfs_fini(struct drm_i915_private *i915)
5810be98a7SChris Wilson {
5910be98a7SChris Wilson 	kern_unmount(i915->mm.gemfs);
6010be98a7SChris Wilson }
61