1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Description: UBLK_F_BATCH_IO buffer management 4 */ 5 6 #include "kublk.h" 7 8 static inline void *ublk_get_commit_buf(struct ublk_thread *t, 9 unsigned short buf_idx) 10 { 11 unsigned idx; 12 13 if (buf_idx < t->commit_buf_start || 14 buf_idx >= t->commit_buf_start + t->nr_commit_buf) 15 return NULL; 16 idx = buf_idx - t->commit_buf_start; 17 return t->commit_buf + idx * t->commit_buf_size; 18 } 19 20 /* 21 * Allocate one buffer for UBLK_U_IO_PREP_IO_CMDS or UBLK_U_IO_COMMIT_IO_CMDS 22 * 23 * Buffer index is returned. 24 */ 25 static inline unsigned short ublk_alloc_commit_buf(struct ublk_thread *t) 26 { 27 int idx = allocator_get(&t->commit_buf_alloc); 28 29 if (idx >= 0) 30 return idx + t->commit_buf_start; 31 return UBLKS_T_COMMIT_BUF_INV_IDX; 32 } 33 34 /* 35 * Free one commit buffer which is used by UBLK_U_IO_PREP_IO_CMDS or 36 * UBLK_U_IO_COMMIT_IO_CMDS 37 */ 38 static inline void ublk_free_commit_buf(struct ublk_thread *t, 39 unsigned short i) 40 { 41 unsigned short idx = i - t->commit_buf_start; 42 43 ublk_assert(idx < t->nr_commit_buf); 44 ublk_assert(allocator_get_val(&t->commit_buf_alloc, idx) != 0); 45 46 allocator_put(&t->commit_buf_alloc, idx); 47 } 48 49 static unsigned char ublk_commit_elem_buf_size(struct ublk_dev *dev) 50 { 51 if (dev->dev_info.flags & (UBLK_F_SUPPORT_ZERO_COPY | UBLK_F_USER_COPY | 52 UBLK_F_AUTO_BUF_REG)) 53 return 8; 54 55 /* one extra 8bytes for carrying buffer address */ 56 return 16; 57 } 58 59 static unsigned ublk_commit_buf_size(struct ublk_thread *t) 60 { 61 struct ublk_dev *dev = t->dev; 62 unsigned elem_size = ublk_commit_elem_buf_size(dev); 63 unsigned int total = elem_size * dev->dev_info.queue_depth; 64 unsigned int page_sz = getpagesize(); 65 66 return round_up(total, page_sz); 67 } 68 69 static void free_batch_commit_buf(struct ublk_thread *t) 70 { 71 if (t->commit_buf) { 72 unsigned buf_size = ublk_commit_buf_size(t); 73 unsigned int total = buf_size * t->nr_commit_buf; 74 75 munlock(t->commit_buf, total); 76 free(t->commit_buf); 77 } 78 allocator_deinit(&t->commit_buf_alloc); 79 } 80 81 static int alloc_batch_commit_buf(struct ublk_thread *t) 82 { 83 unsigned buf_size = ublk_commit_buf_size(t); 84 unsigned int total = buf_size * t->nr_commit_buf; 85 unsigned int page_sz = getpagesize(); 86 void *buf = NULL; 87 int ret; 88 89 allocator_init(&t->commit_buf_alloc, t->nr_commit_buf); 90 91 t->commit_buf = NULL; 92 ret = posix_memalign(&buf, page_sz, total); 93 if (ret || !buf) 94 goto fail; 95 96 t->commit_buf = buf; 97 98 /* lock commit buffer pages for fast access */ 99 if (mlock(t->commit_buf, total)) 100 ublk_err("%s: can't lock commit buffer %s\n", __func__, 101 strerror(errno)); 102 103 return 0; 104 105 fail: 106 free_batch_commit_buf(t); 107 return ret; 108 } 109 110 void ublk_batch_prepare(struct ublk_thread *t) 111 { 112 /* 113 * We only handle single device in this thread context. 114 * 115 * All queues have same feature flags, so use queue 0's for 116 * calculate uring_cmd flags. 117 * 118 * This way looks not elegant, but it works so far. 119 */ 120 struct ublk_queue *q = &t->dev->q[0]; 121 122 t->commit_buf_elem_size = ublk_commit_elem_buf_size(t->dev); 123 t->commit_buf_size = ublk_commit_buf_size(t); 124 t->commit_buf_start = t->nr_bufs; 125 t->nr_commit_buf = 2; 126 t->nr_bufs += t->nr_commit_buf; 127 128 t->cmd_flags = 0; 129 if (ublk_queue_use_auto_zc(q)) { 130 if (ublk_queue_auto_zc_fallback(q)) 131 t->cmd_flags |= UBLK_BATCH_F_AUTO_BUF_REG_FALLBACK; 132 } else if (!ublk_queue_no_buf(q)) 133 t->cmd_flags |= UBLK_BATCH_F_HAS_BUF_ADDR; 134 135 t->state |= UBLKS_T_BATCH_IO; 136 137 ublk_log("%s: thread %d commit(nr_bufs %u, buf_size %u, start %u)\n", 138 __func__, t->idx, 139 t->nr_commit_buf, t->commit_buf_size, 140 t->nr_bufs); 141 } 142 143 int ublk_batch_alloc_buf(struct ublk_thread *t) 144 { 145 ublk_assert(t->nr_commit_buf < 16); 146 return alloc_batch_commit_buf(t); 147 } 148 149 void ublk_batch_free_buf(struct ublk_thread *t) 150 { 151 free_batch_commit_buf(t); 152 } 153