1 /* 2 * scsi_pm.c Copyright (C) 2010 Alan Stern 3 * 4 * SCSI dynamic Power Management 5 * Initial version: Alan Stern <stern@rowland.harvard.edu> 6 */ 7 8 #include <linux/pm_runtime.h> 9 #include <linux/export.h> 10 #include <linux/async.h> 11 12 #include <scsi/scsi.h> 13 #include <scsi/scsi_device.h> 14 #include <scsi/scsi_driver.h> 15 #include <scsi/scsi_host.h> 16 17 #include "scsi_priv.h" 18 19 #ifdef CONFIG_PM_SLEEP 20 21 static int do_scsi_suspend(struct device *dev, const struct dev_pm_ops *pm) 22 { 23 return pm && pm->suspend ? pm->suspend(dev) : 0; 24 } 25 26 static int do_scsi_freeze(struct device *dev, const struct dev_pm_ops *pm) 27 { 28 return pm && pm->freeze ? pm->freeze(dev) : 0; 29 } 30 31 static int do_scsi_poweroff(struct device *dev, const struct dev_pm_ops *pm) 32 { 33 return pm && pm->poweroff ? pm->poweroff(dev) : 0; 34 } 35 36 static int do_scsi_resume(struct device *dev, const struct dev_pm_ops *pm) 37 { 38 return pm && pm->resume ? pm->resume(dev) : 0; 39 } 40 41 static int do_scsi_thaw(struct device *dev, const struct dev_pm_ops *pm) 42 { 43 return pm && pm->thaw ? pm->thaw(dev) : 0; 44 } 45 46 static int do_scsi_restore(struct device *dev, const struct dev_pm_ops *pm) 47 { 48 return pm && pm->restore ? pm->restore(dev) : 0; 49 } 50 51 static int scsi_dev_type_suspend(struct device *dev, 52 int (*cb)(struct device *, const struct dev_pm_ops *)) 53 { 54 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 55 int err; 56 57 /* flush pending in-flight resume operations, suspend is synchronous */ 58 async_synchronize_full_domain(&scsi_sd_pm_domain); 59 60 err = scsi_device_quiesce(to_scsi_device(dev)); 61 if (err == 0) { 62 err = cb(dev, pm); 63 if (err) 64 scsi_device_resume(to_scsi_device(dev)); 65 } 66 dev_dbg(dev, "scsi suspend: %d\n", err); 67 return err; 68 } 69 70 static int scsi_dev_type_resume(struct device *dev, 71 int (*cb)(struct device *, const struct dev_pm_ops *)) 72 { 73 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 74 int err = 0; 75 76 err = cb(dev, pm); 77 scsi_device_resume(to_scsi_device(dev)); 78 dev_dbg(dev, "scsi resume: %d\n", err); 79 80 if (err == 0) { 81 pm_runtime_disable(dev); 82 pm_runtime_set_active(dev); 83 pm_runtime_enable(dev); 84 } 85 86 return err; 87 } 88 89 static int 90 scsi_bus_suspend_common(struct device *dev, 91 int (*cb)(struct device *, const struct dev_pm_ops *)) 92 { 93 int err = 0; 94 95 if (scsi_is_sdev_device(dev)) { 96 /* 97 * All the high-level SCSI drivers that implement runtime 98 * PM treat runtime suspend, system suspend, and system 99 * hibernate nearly identically. In all cases the requirements 100 * for runtime suspension are stricter. 101 */ 102 if (pm_runtime_suspended(dev)) 103 return 0; 104 105 err = scsi_dev_type_suspend(dev, cb); 106 } 107 108 return err; 109 } 110 111 static void async_sdev_resume(void *dev, async_cookie_t cookie) 112 { 113 scsi_dev_type_resume(dev, do_scsi_resume); 114 } 115 116 static void async_sdev_thaw(void *dev, async_cookie_t cookie) 117 { 118 scsi_dev_type_resume(dev, do_scsi_thaw); 119 } 120 121 static void async_sdev_restore(void *dev, async_cookie_t cookie) 122 { 123 scsi_dev_type_resume(dev, do_scsi_restore); 124 } 125 126 static int scsi_bus_resume_common(struct device *dev, 127 int (*cb)(struct device *, const struct dev_pm_ops *)) 128 { 129 async_func_t fn; 130 131 if (!scsi_is_sdev_device(dev)) 132 fn = NULL; 133 else if (cb == do_scsi_resume) 134 fn = async_sdev_resume; 135 else if (cb == do_scsi_thaw) 136 fn = async_sdev_thaw; 137 else if (cb == do_scsi_restore) 138 fn = async_sdev_restore; 139 else 140 fn = NULL; 141 142 if (fn) { 143 async_schedule_domain(fn, dev, &scsi_sd_pm_domain); 144 145 /* 146 * If a user has disabled async probing a likely reason 147 * is due to a storage enclosure that does not inject 148 * staggered spin-ups. For safety, make resume 149 * synchronous as well in that case. 150 */ 151 if (strncmp(scsi_scan_type, "async", 5) != 0) 152 async_synchronize_full_domain(&scsi_sd_pm_domain); 153 } else { 154 pm_runtime_disable(dev); 155 pm_runtime_set_active(dev); 156 pm_runtime_enable(dev); 157 } 158 return 0; 159 } 160 161 static int scsi_bus_prepare(struct device *dev) 162 { 163 if (scsi_is_sdev_device(dev)) { 164 /* sd probing uses async_schedule. Wait until it finishes. */ 165 async_synchronize_full_domain(&scsi_sd_probe_domain); 166 167 } else if (scsi_is_host_device(dev)) { 168 /* Wait until async scanning is finished */ 169 scsi_complete_async_scans(); 170 } 171 return 0; 172 } 173 174 static int scsi_bus_suspend(struct device *dev) 175 { 176 return scsi_bus_suspend_common(dev, do_scsi_suspend); 177 } 178 179 static int scsi_bus_resume(struct device *dev) 180 { 181 return scsi_bus_resume_common(dev, do_scsi_resume); 182 } 183 184 static int scsi_bus_freeze(struct device *dev) 185 { 186 return scsi_bus_suspend_common(dev, do_scsi_freeze); 187 } 188 189 static int scsi_bus_thaw(struct device *dev) 190 { 191 return scsi_bus_resume_common(dev, do_scsi_thaw); 192 } 193 194 static int scsi_bus_poweroff(struct device *dev) 195 { 196 return scsi_bus_suspend_common(dev, do_scsi_poweroff); 197 } 198 199 static int scsi_bus_restore(struct device *dev) 200 { 201 return scsi_bus_resume_common(dev, do_scsi_restore); 202 } 203 204 #else /* CONFIG_PM_SLEEP */ 205 206 #define scsi_bus_prepare NULL 207 #define scsi_bus_suspend NULL 208 #define scsi_bus_resume NULL 209 #define scsi_bus_freeze NULL 210 #define scsi_bus_thaw NULL 211 #define scsi_bus_poweroff NULL 212 #define scsi_bus_restore NULL 213 214 #endif /* CONFIG_PM_SLEEP */ 215 216 #ifdef CONFIG_PM_RUNTIME 217 218 static int sdev_runtime_suspend(struct device *dev) 219 { 220 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 221 struct scsi_device *sdev = to_scsi_device(dev); 222 int err; 223 224 err = blk_pre_runtime_suspend(sdev->request_queue); 225 if (err) 226 return err; 227 if (pm && pm->runtime_suspend) 228 err = pm->runtime_suspend(dev); 229 blk_post_runtime_suspend(sdev->request_queue, err); 230 231 return err; 232 } 233 234 static int scsi_runtime_suspend(struct device *dev) 235 { 236 int err = 0; 237 238 dev_dbg(dev, "scsi_runtime_suspend\n"); 239 if (scsi_is_sdev_device(dev)) 240 err = sdev_runtime_suspend(dev); 241 242 /* Insert hooks here for targets, hosts, and transport classes */ 243 244 return err; 245 } 246 247 static int sdev_runtime_resume(struct device *dev) 248 { 249 struct scsi_device *sdev = to_scsi_device(dev); 250 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 251 int err = 0; 252 253 blk_pre_runtime_resume(sdev->request_queue); 254 if (pm && pm->runtime_resume) 255 err = pm->runtime_resume(dev); 256 blk_post_runtime_resume(sdev->request_queue, err); 257 258 return err; 259 } 260 261 static int scsi_runtime_resume(struct device *dev) 262 { 263 int err = 0; 264 265 dev_dbg(dev, "scsi_runtime_resume\n"); 266 if (scsi_is_sdev_device(dev)) 267 err = sdev_runtime_resume(dev); 268 269 /* Insert hooks here for targets, hosts, and transport classes */ 270 271 return err; 272 } 273 274 static int scsi_runtime_idle(struct device *dev) 275 { 276 dev_dbg(dev, "scsi_runtime_idle\n"); 277 278 /* Insert hooks here for targets, hosts, and transport classes */ 279 280 if (scsi_is_sdev_device(dev)) { 281 pm_runtime_mark_last_busy(dev); 282 pm_runtime_autosuspend(dev); 283 return -EBUSY; 284 } 285 286 return 0; 287 } 288 289 int scsi_autopm_get_device(struct scsi_device *sdev) 290 { 291 int err; 292 293 err = pm_runtime_get_sync(&sdev->sdev_gendev); 294 if (err < 0 && err !=-EACCES) 295 pm_runtime_put_sync(&sdev->sdev_gendev); 296 else 297 err = 0; 298 return err; 299 } 300 EXPORT_SYMBOL_GPL(scsi_autopm_get_device); 301 302 void scsi_autopm_put_device(struct scsi_device *sdev) 303 { 304 pm_runtime_put_sync(&sdev->sdev_gendev); 305 } 306 EXPORT_SYMBOL_GPL(scsi_autopm_put_device); 307 308 void scsi_autopm_get_target(struct scsi_target *starget) 309 { 310 pm_runtime_get_sync(&starget->dev); 311 } 312 313 void scsi_autopm_put_target(struct scsi_target *starget) 314 { 315 pm_runtime_put_sync(&starget->dev); 316 } 317 318 int scsi_autopm_get_host(struct Scsi_Host *shost) 319 { 320 int err; 321 322 err = pm_runtime_get_sync(&shost->shost_gendev); 323 if (err < 0 && err !=-EACCES) 324 pm_runtime_put_sync(&shost->shost_gendev); 325 else 326 err = 0; 327 return err; 328 } 329 330 void scsi_autopm_put_host(struct Scsi_Host *shost) 331 { 332 pm_runtime_put_sync(&shost->shost_gendev); 333 } 334 335 #else 336 337 #define scsi_runtime_suspend NULL 338 #define scsi_runtime_resume NULL 339 #define scsi_runtime_idle NULL 340 341 #endif /* CONFIG_PM_RUNTIME */ 342 343 const struct dev_pm_ops scsi_bus_pm_ops = { 344 .prepare = scsi_bus_prepare, 345 .suspend = scsi_bus_suspend, 346 .resume = scsi_bus_resume, 347 .freeze = scsi_bus_freeze, 348 .thaw = scsi_bus_thaw, 349 .poweroff = scsi_bus_poweroff, 350 .restore = scsi_bus_restore, 351 .runtime_suspend = scsi_runtime_suspend, 352 .runtime_resume = scsi_runtime_resume, 353 .runtime_idle = scsi_runtime_idle, 354 }; 355