Lines Matching full:dc

173 	struct cached_dev *dc = container_of(kobj, struct cached_dev,  in SHOW()  local
176 int wb = dc->writeback_running; in SHOW()
178 #define var(stat) (dc->stat) in SHOW()
183 BDEV_CACHE_MODE(&dc->sb)); in SHOW()
188 dc->cache_readahead_policy); in SHOW()
193 dc->stop_when_cache_set_failed); in SHOW()
196 sysfs_printf(data_csum, "%i", dc->disk.data_csum); in SHOW()
205 wb ? atomic_long_read(&dc->writeback_rate.rate) << 9 : 0); in SHOW()
206 sysfs_printf(io_errors, "%i", atomic_read(&dc->io_errors)); in SHOW()
207 sysfs_printf(io_error_limit, "%i", dc->error_limit); in SHOW()
208 sysfs_printf(io_disable, "%i", dc->io_disable); in SHOW()
231 wb ? atomic_long_read(&dc->writeback_rate.rate) << 9 in SHOW()
233 bch_hprint(dirty, bcache_dev_sectors_dirty(&dc->disk) << 9); in SHOW()
234 bch_hprint(target, dc->writeback_rate_target << 9); in SHOW()
236 wb ? dc->writeback_rate_proportional << 9 : 0); in SHOW()
238 wb ? dc->writeback_rate_integral_scaled << 9 : 0); in SHOW()
239 bch_hprint(change, wb ? dc->writeback_rate_change << 9 : 0); in SHOW()
240 next_io = wb ? div64_s64(dc->writeback_rate.next-local_clock(), in SHOW()
256 bcache_dev_sectors_dirty(&dc->disk) << 9); in SHOW()
258 sysfs_hprint(stripe_size, ((uint64_t)dc->disk.stripe_size) << 9); in SHOW()
263 sysfs_print(running, atomic_read(&dc->running)); in SHOW()
264 sysfs_print(state, states[BDEV_STATE(&dc->sb)]); in SHOW()
267 memcpy(buf, dc->sb.label, SB_LABEL_SIZE); in SHOW()
274 snprintf(buf, BDEVNAME_SIZE + 1, "%pg", dc->bdev); in SHOW()
281 snprintf(buf, 36+1, "%pU", dc->sb.uuid); in SHOW()
293 struct cached_dev *dc = container_of(kobj, struct cached_dev, in STORE() local
303 #define d_strtoul(var) sysfs_strtoul(var, dc->var) in STORE()
304 #define d_strtoul_nonzero(var) sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX) in STORE()
305 #define d_strtoi_h(var) sysfs_hatoi(var, dc->var) in STORE()
307 sysfs_strtoul(data_csum, dc->disk.data_csum); in STORE()
309 sysfs_strtoul_bool(bypass_torture_test, dc->bypass_torture_test); in STORE()
310 sysfs_strtoul_bool(writeback_metadata, dc->writeback_metadata); in STORE()
311 sysfs_strtoul_bool(writeback_running, dc->writeback_running); in STORE()
312 sysfs_strtoul_bool(writeback_consider_fragment, dc->writeback_consider_fragment); in STORE()
313 sysfs_strtoul_clamp(writeback_delay, dc->writeback_delay, 0, UINT_MAX); in STORE()
315 sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, in STORE()
320 long int v = atomic_long_read(&dc->writeback_rate.rate); in STORE()
325 atomic_long_set(&dc->writeback_rate.rate, v); in STORE()
333 dc->writeback_rate_update_seconds, in STORE()
336 dc->writeback_rate_i_term_inverse, in STORE()
339 dc->writeback_rate_p_term_inverse, in STORE()
342 dc->writeback_rate_fp_term_low, in STORE()
343 1, dc->writeback_rate_fp_term_mid - 1); in STORE()
345 dc->writeback_rate_fp_term_mid, in STORE()
346 dc->writeback_rate_fp_term_low + 1, in STORE()
347 dc->writeback_rate_fp_term_high - 1); in STORE()
349 dc->writeback_rate_fp_term_high, in STORE()
350 dc->writeback_rate_fp_term_mid + 1, UINT_MAX); in STORE()
352 dc->writeback_rate_minimum, in STORE()
355 sysfs_strtoul_clamp(io_error_limit, dc->error_limit, 0, INT_MAX); in STORE()
360 dc->io_disable = v ? 1 : 0; in STORE()
364 dc->sequential_cutoff, in STORE()
368 bch_cache_accounting_clear(&dc->accounting); in STORE()
372 v = bch_cached_dev_run(dc); in STORE()
382 if ((unsigned int) v != BDEV_CACHE_MODE(&dc->sb)) { in STORE()
383 SET_BDEV_CACHE_MODE(&dc->sb, v); in STORE()
384 bch_write_bdev_super(dc, NULL); in STORE()
393 if ((unsigned int) v != dc->cache_readahead_policy) in STORE()
394 dc->cache_readahead_policy = v; in STORE()
402 dc->stop_when_cache_set_failed = v; in STORE()
408 memcpy(dc->sb.label, buf, size); in STORE()
410 dc->sb.label[size] = '\0'; in STORE()
411 if (size && dc->sb.label[size - 1] == '\n') in STORE()
412 dc->sb.label[size - 1] = '\0'; in STORE()
413 bch_write_bdev_super(dc, NULL); in STORE()
414 if (dc->disk.c) { in STORE()
415 memcpy(dc->disk.c->uuids[dc->disk.id].label, in STORE()
417 bch_uuid_write(dc->disk.c); in STORE()
423 add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid); in STORE()
425 kobject_uevent_env(&disk_to_dev(dc->disk.disk)->kobj, in STORE()
439 v = bch_cached_dev_attach(dc, c, set_uuid); in STORE()
448 if (attr == &sysfs_detach && dc->disk.c) in STORE()
449 bch_cached_dev_detach(dc); in STORE()
452 bcache_device_stop(&dc->disk); in STORE()
459 struct cached_dev *dc = container_of(kobj, struct cached_dev, in STORE() local
470 /* dc->writeback_running changed in __cached_dev_store() */ in STORE()
471 if (IS_ERR_OR_NULL(dc->writeback_thread)) { in STORE()
476 if (dc->writeback_running) { in STORE()
477 dc->writeback_running = false; in STORE()
479 dc->disk.disk->disk_name); in STORE()
483 * writeback kthread will check if dc->writeback_running in STORE()
486 bch_writeback_queue(dc); in STORE()
494 if ((dc->disk.c != NULL) && in STORE()
495 (!test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))) in STORE()
496 schedule_delayed_work(&dc->writeback_rate_update, in STORE()
497 dc->writeback_rate_update_seconds * HZ); in STORE()