1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright(c) 2021 Intel Corporation. All rights reserved. */ 3 #include <linux/libnvdimm.h> 4 #include <linux/device.h> 5 #include <linux/module.h> 6 #include <linux/ndctl.h> 7 #include <linux/async.h> 8 #include <linux/slab.h> 9 #include "mem.h" 10 #include "cxl.h" 11 12 /* 13 * Ordered workqueue for cxl nvdimm device arrival and departure 14 * to coordinate bus rescans when a bridge arrives and trigger remove 15 * operations when the bridge is removed. 16 */ 17 static struct workqueue_struct *cxl_pmem_wq; 18 19 static void unregister_nvdimm(void *nvdimm) 20 { 21 nvdimm_delete(nvdimm); 22 } 23 24 static int match_nvdimm_bridge(struct device *dev, const void *data) 25 { 26 return strcmp(dev_name(dev), "nvdimm-bridge") == 0; 27 } 28 29 static struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(void) 30 { 31 struct device *dev; 32 33 dev = bus_find_device(&cxl_bus_type, NULL, NULL, match_nvdimm_bridge); 34 if (!dev) 35 return NULL; 36 return to_cxl_nvdimm_bridge(dev); 37 } 38 39 static int cxl_nvdimm_probe(struct device *dev) 40 { 41 struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev); 42 struct cxl_nvdimm_bridge *cxl_nvb; 43 unsigned long flags = 0; 44 struct nvdimm *nvdimm; 45 int rc = -ENXIO; 46 47 cxl_nvb = cxl_find_nvdimm_bridge(); 48 if (!cxl_nvb) 49 return -ENXIO; 50 51 device_lock(&cxl_nvb->dev); 52 if (!cxl_nvb->nvdimm_bus) 53 goto out; 54 55 set_bit(NDD_LABELING, &flags); 56 nvdimm = nvdimm_create(cxl_nvb->nvdimm_bus, cxl_nvd, NULL, flags, 0, 0, 57 NULL); 58 if (!nvdimm) 59 goto out; 60 61 rc = devm_add_action_or_reset(dev, unregister_nvdimm, nvdimm); 62 out: 63 device_unlock(&cxl_nvb->dev); 64 put_device(&cxl_nvb->dev); 65 66 return rc; 67 } 68 69 static struct cxl_driver cxl_nvdimm_driver = { 70 .name = "cxl_nvdimm", 71 .probe = cxl_nvdimm_probe, 72 .id = CXL_DEVICE_NVDIMM, 73 }; 74 75 static int cxl_pmem_ctl(struct nvdimm_bus_descriptor *nd_desc, 76 struct nvdimm *nvdimm, unsigned int cmd, void *buf, 77 unsigned int buf_len, int *cmd_rc) 78 { 79 return -ENOTTY; 80 } 81 82 static bool online_nvdimm_bus(struct cxl_nvdimm_bridge *cxl_nvb) 83 { 84 if (cxl_nvb->nvdimm_bus) 85 return true; 86 cxl_nvb->nvdimm_bus = 87 nvdimm_bus_register(&cxl_nvb->dev, &cxl_nvb->nd_desc); 88 return cxl_nvb->nvdimm_bus != NULL; 89 } 90 91 static int cxl_nvdimm_release_driver(struct device *dev, void *data) 92 { 93 if (!is_cxl_nvdimm(dev)) 94 return 0; 95 device_release_driver(dev); 96 return 0; 97 } 98 99 static void offline_nvdimm_bus(struct nvdimm_bus *nvdimm_bus) 100 { 101 if (!nvdimm_bus) 102 return; 103 104 /* 105 * Set the state of cxl_nvdimm devices to unbound / idle before 106 * nvdimm_bus_unregister() rips the nvdimm objects out from 107 * underneath them. 108 */ 109 bus_for_each_dev(&cxl_bus_type, NULL, NULL, cxl_nvdimm_release_driver); 110 nvdimm_bus_unregister(nvdimm_bus); 111 } 112 113 static void cxl_nvb_update_state(struct work_struct *work) 114 { 115 struct cxl_nvdimm_bridge *cxl_nvb = 116 container_of(work, typeof(*cxl_nvb), state_work); 117 struct nvdimm_bus *victim_bus = NULL; 118 bool release = false, rescan = false; 119 120 device_lock(&cxl_nvb->dev); 121 switch (cxl_nvb->state) { 122 case CXL_NVB_ONLINE: 123 if (!online_nvdimm_bus(cxl_nvb)) { 124 dev_err(&cxl_nvb->dev, 125 "failed to establish nvdimm bus\n"); 126 release = true; 127 } else 128 rescan = true; 129 break; 130 case CXL_NVB_OFFLINE: 131 case CXL_NVB_DEAD: 132 victim_bus = cxl_nvb->nvdimm_bus; 133 cxl_nvb->nvdimm_bus = NULL; 134 break; 135 default: 136 break; 137 } 138 device_unlock(&cxl_nvb->dev); 139 140 if (release) 141 device_release_driver(&cxl_nvb->dev); 142 if (rescan) { 143 int rc = bus_rescan_devices(&cxl_bus_type); 144 145 dev_dbg(&cxl_nvb->dev, "rescan: %d\n", rc); 146 } 147 offline_nvdimm_bus(victim_bus); 148 149 put_device(&cxl_nvb->dev); 150 } 151 152 static void cxl_nvdimm_bridge_remove(struct device *dev) 153 { 154 struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev); 155 156 if (cxl_nvb->state == CXL_NVB_ONLINE) 157 cxl_nvb->state = CXL_NVB_OFFLINE; 158 if (queue_work(cxl_pmem_wq, &cxl_nvb->state_work)) 159 get_device(&cxl_nvb->dev); 160 } 161 162 static int cxl_nvdimm_bridge_probe(struct device *dev) 163 { 164 struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev); 165 166 if (cxl_nvb->state == CXL_NVB_DEAD) 167 return -ENXIO; 168 169 if (cxl_nvb->state == CXL_NVB_NEW) { 170 cxl_nvb->nd_desc = (struct nvdimm_bus_descriptor) { 171 .provider_name = "CXL", 172 .module = THIS_MODULE, 173 .ndctl = cxl_pmem_ctl, 174 }; 175 176 INIT_WORK(&cxl_nvb->state_work, cxl_nvb_update_state); 177 } 178 179 cxl_nvb->state = CXL_NVB_ONLINE; 180 if (queue_work(cxl_pmem_wq, &cxl_nvb->state_work)) 181 get_device(&cxl_nvb->dev); 182 183 return 0; 184 } 185 186 static struct cxl_driver cxl_nvdimm_bridge_driver = { 187 .name = "cxl_nvdimm_bridge", 188 .probe = cxl_nvdimm_bridge_probe, 189 .remove = cxl_nvdimm_bridge_remove, 190 .id = CXL_DEVICE_NVDIMM_BRIDGE, 191 }; 192 193 static __init int cxl_pmem_init(void) 194 { 195 int rc; 196 197 cxl_pmem_wq = alloc_ordered_workqueue("cxl_pmem", 0); 198 if (!cxl_pmem_wq) 199 return -ENXIO; 200 201 rc = cxl_driver_register(&cxl_nvdimm_bridge_driver); 202 if (rc) 203 goto err_bridge; 204 205 rc = cxl_driver_register(&cxl_nvdimm_driver); 206 if (rc) 207 goto err_nvdimm; 208 209 return 0; 210 211 err_nvdimm: 212 cxl_driver_unregister(&cxl_nvdimm_bridge_driver); 213 err_bridge: 214 destroy_workqueue(cxl_pmem_wq); 215 return rc; 216 } 217 218 static __exit void cxl_pmem_exit(void) 219 { 220 cxl_driver_unregister(&cxl_nvdimm_driver); 221 cxl_driver_unregister(&cxl_nvdimm_bridge_driver); 222 destroy_workqueue(cxl_pmem_wq); 223 } 224 225 MODULE_LICENSE("GPL v2"); 226 module_init(cxl_pmem_init); 227 module_exit(cxl_pmem_exit); 228 MODULE_IMPORT_NS(CXL); 229 MODULE_ALIAS_CXL(CXL_DEVICE_NVDIMM_BRIDGE); 230 MODULE_ALIAS_CXL(CXL_DEVICE_NVDIMM); 231