1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_DAX_H 3 #define _LINUX_DAX_H 4 5 #include <linux/fs.h> 6 #include <linux/mm.h> 7 #include <linux/radix-tree.h> 8 9 typedef unsigned long dax_entry_t; 10 11 struct dax_device; 12 struct gendisk; 13 struct iomap_ops; 14 struct iomap_iter; 15 struct iomap; 16 17 enum dax_access_mode { 18 DAX_ACCESS, 19 DAX_RECOVERY_WRITE, 20 }; 21 22 struct dax_operations { 23 /* 24 * direct_access: translate a device-relative 25 * logical-page-offset into an absolute physical pfn. Return the 26 * number of pages available for DAX at that pfn. 27 */ 28 long (*direct_access)(struct dax_device *, pgoff_t, long, 29 enum dax_access_mode, void **, pfn_t *); 30 /* zero_page_range: required operation. Zero page range */ 31 int (*zero_page_range)(struct dax_device *, pgoff_t, size_t); 32 /* 33 * recovery_write: recover a poisoned range by DAX device driver 34 * capable of clearing poison. 35 */ 36 size_t (*recovery_write)(struct dax_device *dax_dev, pgoff_t pgoff, 37 void *addr, size_t bytes, struct iov_iter *iter); 38 }; 39 40 struct dax_holder_operations { 41 /* 42 * notify_failure - notify memory failure into inner holder device 43 * @dax_dev: the dax device which contains the holder 44 * @offset: offset on this dax device where memory failure occurs 45 * @len: length of this memory failure event 46 * @flags: action flags for memory failure handler 47 */ 48 int (*notify_failure)(struct dax_device *dax_dev, u64 offset, 49 u64 len, int mf_flags); 50 }; 51 52 #if IS_ENABLED(CONFIG_DAX) 53 struct dax_device *alloc_dax(void *private, const struct dax_operations *ops); 54 void *dax_holder(struct dax_device *dax_dev); 55 void put_dax(struct dax_device *dax_dev); 56 void kill_dax(struct dax_device *dax_dev); 57 void dax_write_cache(struct dax_device *dax_dev, bool wc); 58 bool dax_write_cache_enabled(struct dax_device *dax_dev); 59 bool dax_synchronous(struct dax_device *dax_dev); 60 void set_dax_nocache(struct dax_device *dax_dev); 61 void set_dax_nomc(struct dax_device *dax_dev); 62 void set_dax_synchronous(struct dax_device *dax_dev); 63 size_t dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff, 64 void *addr, size_t bytes, struct iov_iter *i); 65 /* 66 * Check if given mapping is supported by the file / underlying device. 67 */ 68 static inline bool daxdev_mapping_supported(struct vm_area_struct *vma, 69 struct dax_device *dax_dev) 70 { 71 if (!(vma->vm_flags & VM_SYNC)) 72 return true; 73 if (!IS_DAX(file_inode(vma->vm_file))) 74 return false; 75 return dax_synchronous(dax_dev); 76 } 77 #else 78 static inline void *dax_holder(struct dax_device *dax_dev) 79 { 80 return NULL; 81 } 82 static inline struct dax_device *alloc_dax(void *private, 83 const struct dax_operations *ops) 84 { 85 return ERR_PTR(-EOPNOTSUPP); 86 } 87 static inline void put_dax(struct dax_device *dax_dev) 88 { 89 } 90 static inline void kill_dax(struct dax_device *dax_dev) 91 { 92 } 93 static inline void dax_write_cache(struct dax_device *dax_dev, bool wc) 94 { 95 } 96 static inline bool dax_write_cache_enabled(struct dax_device *dax_dev) 97 { 98 return false; 99 } 100 static inline bool dax_synchronous(struct dax_device *dax_dev) 101 { 102 return true; 103 } 104 static inline void set_dax_nocache(struct dax_device *dax_dev) 105 { 106 } 107 static inline void set_dax_nomc(struct dax_device *dax_dev) 108 { 109 } 110 static inline void set_dax_synchronous(struct dax_device *dax_dev) 111 { 112 } 113 static inline bool daxdev_mapping_supported(struct vm_area_struct *vma, 114 struct dax_device *dax_dev) 115 { 116 return !(vma->vm_flags & VM_SYNC); 117 } 118 static inline size_t dax_recovery_write(struct dax_device *dax_dev, 119 pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i) 120 { 121 return 0; 122 } 123 #endif 124 125 struct writeback_control; 126 #if defined(CONFIG_BLOCK) && defined(CONFIG_FS_DAX) 127 int dax_add_host(struct dax_device *dax_dev, struct gendisk *disk); 128 void dax_remove_host(struct gendisk *disk); 129 struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev, u64 *start_off, 130 void *holder, const struct dax_holder_operations *ops); 131 void fs_put_dax(struct dax_device *dax_dev, void *holder); 132 #else 133 static inline int dax_add_host(struct dax_device *dax_dev, struct gendisk *disk) 134 { 135 return 0; 136 } 137 static inline void dax_remove_host(struct gendisk *disk) 138 { 139 } 140 static inline struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev, 141 u64 *start_off, void *holder, 142 const struct dax_holder_operations *ops) 143 { 144 return NULL; 145 } 146 static inline void fs_put_dax(struct dax_device *dax_dev, void *holder) 147 { 148 } 149 #endif /* CONFIG_BLOCK && CONFIG_FS_DAX */ 150 151 #if IS_ENABLED(CONFIG_FS_DAX) 152 int dax_writeback_mapping_range(struct address_space *mapping, 153 struct dax_device *dax_dev, struct writeback_control *wbc); 154 155 struct page *dax_layout_busy_page(struct address_space *mapping); 156 struct page *dax_layout_busy_page_range(struct address_space *mapping, loff_t start, loff_t end); 157 dax_entry_t dax_lock_folio(struct folio *folio); 158 void dax_unlock_folio(struct folio *folio, dax_entry_t cookie); 159 dax_entry_t dax_lock_mapping_entry(struct address_space *mapping, 160 unsigned long index, struct page **page); 161 void dax_unlock_mapping_entry(struct address_space *mapping, 162 unsigned long index, dax_entry_t cookie); 163 #else 164 static inline struct page *dax_layout_busy_page(struct address_space *mapping) 165 { 166 return NULL; 167 } 168 169 static inline struct page *dax_layout_busy_page_range(struct address_space *mapping, pgoff_t start, pgoff_t nr_pages) 170 { 171 return NULL; 172 } 173 174 static inline int dax_writeback_mapping_range(struct address_space *mapping, 175 struct dax_device *dax_dev, struct writeback_control *wbc) 176 { 177 return -EOPNOTSUPP; 178 } 179 180 static inline dax_entry_t dax_lock_folio(struct folio *folio) 181 { 182 if (IS_DAX(folio->mapping->host)) 183 return ~0UL; 184 return 0; 185 } 186 187 static inline void dax_unlock_folio(struct folio *folio, dax_entry_t cookie) 188 { 189 } 190 191 static inline dax_entry_t dax_lock_mapping_entry(struct address_space *mapping, 192 unsigned long index, struct page **page) 193 { 194 return 0; 195 } 196 197 static inline void dax_unlock_mapping_entry(struct address_space *mapping, 198 unsigned long index, dax_entry_t cookie) 199 { 200 } 201 #endif 202 203 int dax_file_unshare(struct inode *inode, loff_t pos, loff_t len, 204 const struct iomap_ops *ops); 205 int dax_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, 206 const struct iomap_ops *ops); 207 int dax_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, 208 const struct iomap_ops *ops); 209 210 #if IS_ENABLED(CONFIG_DAX) 211 int dax_read_lock(void); 212 void dax_read_unlock(int id); 213 #else 214 static inline int dax_read_lock(void) 215 { 216 return 0; 217 } 218 219 static inline void dax_read_unlock(int id) 220 { 221 } 222 #endif /* CONFIG_DAX */ 223 bool dax_alive(struct dax_device *dax_dev); 224 void *dax_get_private(struct dax_device *dax_dev); 225 long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, 226 enum dax_access_mode mode, void **kaddr, pfn_t *pfn); 227 size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, 228 size_t bytes, struct iov_iter *i); 229 size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, 230 size_t bytes, struct iov_iter *i); 231 int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, 232 size_t nr_pages); 233 int dax_holder_notify_failure(struct dax_device *dax_dev, u64 off, u64 len, 234 int mf_flags); 235 void dax_flush(struct dax_device *dax_dev, void *addr, size_t size); 236 237 ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, 238 const struct iomap_ops *ops); 239 vm_fault_t dax_iomap_fault(struct vm_fault *vmf, unsigned int order, 240 pfn_t *pfnp, int *errp, const struct iomap_ops *ops); 241 vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, 242 unsigned int order, pfn_t pfn); 243 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); 244 int dax_invalidate_mapping_entry_sync(struct address_space *mapping, 245 pgoff_t index); 246 int dax_dedupe_file_range_compare(struct inode *src, loff_t srcoff, 247 struct inode *dest, loff_t destoff, 248 loff_t len, bool *is_same, 249 const struct iomap_ops *ops); 250 int dax_remap_file_range_prep(struct file *file_in, loff_t pos_in, 251 struct file *file_out, loff_t pos_out, 252 loff_t *len, unsigned int remap_flags, 253 const struct iomap_ops *ops); 254 static inline bool dax_mapping(struct address_space *mapping) 255 { 256 return mapping->host && IS_DAX(mapping->host); 257 } 258 259 /* 260 * Due to dax's memory and block duo personalities, hwpoison reporting 261 * takes into consideration which personality is presently visible. 262 * When dax acts like a block device, such as in block IO, an encounter of 263 * dax hwpoison is reported as -EIO. 264 * When dax acts like memory, such as in page fault, a detection of hwpoison 265 * is reported as -EHWPOISON which leads to VM_FAULT_HWPOISON. 266 */ 267 static inline int dax_mem2blk_err(int err) 268 { 269 return (err == -EHWPOISON) ? -EIO : err; 270 } 271 272 #ifdef CONFIG_DEV_DAX_HMEM_DEVICES 273 void hmem_register_resource(int target_nid, struct resource *r); 274 #else 275 static inline void hmem_register_resource(int target_nid, struct resource *r) 276 { 277 } 278 #endif 279 280 typedef int (*walk_hmem_fn)(struct device *dev, int target_nid, 281 const struct resource *res); 282 int walk_hmem_resources(struct device *dev, walk_hmem_fn fn); 283 #endif 284