1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Object lifetime handling and tracing. 3 * 4 * Copyright (C) 2022 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 */ 7 8 #include <linux/slab.h> 9 #include <linux/mempool.h> 10 #include <linux/delay.h> 11 #include "internal.h" 12 13 /* 14 * Allocate an I/O request and initialise it. 15 */ 16 struct netfs_io_request *netfs_alloc_request(struct address_space *mapping, 17 struct file *file, 18 loff_t start, size_t len, 19 enum netfs_io_origin origin) 20 { 21 static atomic_t debug_ids; 22 struct inode *inode = file ? file_inode(file) : mapping->host; 23 struct netfs_inode *ctx = netfs_inode(inode); 24 struct netfs_io_request *rreq; 25 mempool_t *mempool = ctx->ops->request_pool ?: &netfs_request_pool; 26 struct kmem_cache *cache = mempool->pool_data; 27 int ret; 28 29 for (;;) { 30 rreq = mempool_alloc(mempool, GFP_KERNEL); 31 if (rreq) 32 break; 33 msleep(10); 34 } 35 36 memset(rreq, 0, kmem_cache_size(cache)); 37 rreq->start = start; 38 rreq->len = len; 39 rreq->origin = origin; 40 rreq->netfs_ops = ctx->ops; 41 rreq->mapping = mapping; 42 rreq->inode = inode; 43 rreq->i_size = i_size_read(inode); 44 rreq->debug_id = atomic_inc_return(&debug_ids); 45 rreq->wsize = INT_MAX; 46 rreq->io_streams[0].sreq_max_len = ULONG_MAX; 47 rreq->io_streams[0].sreq_max_segs = 0; 48 spin_lock_init(&rreq->lock); 49 INIT_LIST_HEAD(&rreq->io_streams[0].subrequests); 50 INIT_LIST_HEAD(&rreq->io_streams[1].subrequests); 51 init_waitqueue_head(&rreq->waitq); 52 refcount_set(&rreq->ref, 1); 53 54 if (origin == NETFS_READAHEAD || 55 origin == NETFS_READPAGE || 56 origin == NETFS_READ_GAPS || 57 origin == NETFS_READ_SINGLE || 58 origin == NETFS_READ_FOR_WRITE || 59 origin == NETFS_DIO_READ) { 60 INIT_WORK(&rreq->work, netfs_read_collection_worker); 61 rreq->io_streams[0].avail = true; 62 } else { 63 INIT_WORK(&rreq->work, netfs_write_collection_worker); 64 } 65 66 __set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags); 67 if (file && file->f_flags & O_NONBLOCK) 68 __set_bit(NETFS_RREQ_NONBLOCK, &rreq->flags); 69 if (rreq->netfs_ops->init_request) { 70 ret = rreq->netfs_ops->init_request(rreq, file); 71 if (ret < 0) { 72 mempool_free(rreq, rreq->netfs_ops->request_pool ?: &netfs_request_pool); 73 return ERR_PTR(ret); 74 } 75 } 76 77 atomic_inc(&ctx->io_count); 78 trace_netfs_rreq_ref(rreq->debug_id, 1, netfs_rreq_trace_new); 79 netfs_proc_add_rreq(rreq); 80 netfs_stat(&netfs_n_rh_rreq); 81 return rreq; 82 } 83 84 void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what) 85 { 86 int r; 87 88 __refcount_inc(&rreq->ref, &r); 89 trace_netfs_rreq_ref(rreq->debug_id, r + 1, what); 90 } 91 92 void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async) 93 { 94 struct netfs_io_subrequest *subreq; 95 struct netfs_io_stream *stream; 96 int s; 97 98 for (s = 0; s < ARRAY_SIZE(rreq->io_streams); s++) { 99 stream = &rreq->io_streams[s]; 100 while (!list_empty(&stream->subrequests)) { 101 subreq = list_first_entry(&stream->subrequests, 102 struct netfs_io_subrequest, rreq_link); 103 list_del(&subreq->rreq_link); 104 netfs_put_subrequest(subreq, was_async, 105 netfs_sreq_trace_put_clear); 106 } 107 } 108 } 109 110 static void netfs_free_request_rcu(struct rcu_head *rcu) 111 { 112 struct netfs_io_request *rreq = container_of(rcu, struct netfs_io_request, rcu); 113 114 mempool_free(rreq, rreq->netfs_ops->request_pool ?: &netfs_request_pool); 115 netfs_stat_d(&netfs_n_rh_rreq); 116 } 117 118 static void netfs_free_request(struct work_struct *work) 119 { 120 struct netfs_io_request *rreq = 121 container_of(work, struct netfs_io_request, work); 122 struct netfs_inode *ictx = netfs_inode(rreq->inode); 123 unsigned int i; 124 125 trace_netfs_rreq(rreq, netfs_rreq_trace_free); 126 netfs_proc_del_rreq(rreq); 127 netfs_clear_subrequests(rreq, false); 128 if (rreq->netfs_ops->free_request) 129 rreq->netfs_ops->free_request(rreq); 130 if (rreq->cache_resources.ops) 131 rreq->cache_resources.ops->end_operation(&rreq->cache_resources); 132 if (rreq->direct_bv) { 133 for (i = 0; i < rreq->direct_bv_count; i++) { 134 if (rreq->direct_bv[i].bv_page) { 135 if (rreq->direct_bv_unpin) 136 unpin_user_page(rreq->direct_bv[i].bv_page); 137 } 138 } 139 kvfree(rreq->direct_bv); 140 } 141 rolling_buffer_clear(&rreq->buffer); 142 143 if (atomic_dec_and_test(&ictx->io_count)) 144 wake_up_var(&ictx->io_count); 145 call_rcu(&rreq->rcu, netfs_free_request_rcu); 146 } 147 148 void netfs_put_request(struct netfs_io_request *rreq, bool was_async, 149 enum netfs_rreq_ref_trace what) 150 { 151 unsigned int debug_id; 152 bool dead; 153 int r; 154 155 if (rreq) { 156 debug_id = rreq->debug_id; 157 dead = __refcount_dec_and_test(&rreq->ref, &r); 158 trace_netfs_rreq_ref(debug_id, r - 1, what); 159 if (dead) { 160 if (was_async) { 161 rreq->work.func = netfs_free_request; 162 if (!queue_work(system_unbound_wq, &rreq->work)) 163 WARN_ON(1); 164 } else { 165 netfs_free_request(&rreq->work); 166 } 167 } 168 } 169 } 170 171 /* 172 * Allocate and partially initialise an I/O request structure. 173 */ 174 struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq) 175 { 176 struct netfs_io_subrequest *subreq; 177 mempool_t *mempool = rreq->netfs_ops->subrequest_pool ?: &netfs_subrequest_pool; 178 struct kmem_cache *cache = mempool->pool_data; 179 180 for (;;) { 181 subreq = mempool_alloc(rreq->netfs_ops->subrequest_pool ?: &netfs_subrequest_pool, 182 GFP_KERNEL); 183 if (subreq) 184 break; 185 msleep(10); 186 } 187 188 memset(subreq, 0, kmem_cache_size(cache)); 189 INIT_WORK(&subreq->work, NULL); 190 INIT_LIST_HEAD(&subreq->rreq_link); 191 refcount_set(&subreq->ref, 2); 192 subreq->rreq = rreq; 193 subreq->debug_index = atomic_inc_return(&rreq->subreq_counter); 194 netfs_get_request(rreq, netfs_rreq_trace_get_subreq); 195 netfs_stat(&netfs_n_rh_sreq); 196 return subreq; 197 } 198 199 void netfs_get_subrequest(struct netfs_io_subrequest *subreq, 200 enum netfs_sreq_ref_trace what) 201 { 202 int r; 203 204 __refcount_inc(&subreq->ref, &r); 205 trace_netfs_sreq_ref(subreq->rreq->debug_id, subreq->debug_index, r + 1, 206 what); 207 } 208 209 static void netfs_free_subrequest(struct netfs_io_subrequest *subreq, 210 bool was_async) 211 { 212 struct netfs_io_request *rreq = subreq->rreq; 213 214 trace_netfs_sreq(subreq, netfs_sreq_trace_free); 215 if (rreq->netfs_ops->free_subrequest) 216 rreq->netfs_ops->free_subrequest(subreq); 217 mempool_free(subreq, rreq->netfs_ops->subrequest_pool ?: &netfs_subrequest_pool); 218 netfs_stat_d(&netfs_n_rh_sreq); 219 netfs_put_request(rreq, was_async, netfs_rreq_trace_put_subreq); 220 } 221 222 void netfs_put_subrequest(struct netfs_io_subrequest *subreq, bool was_async, 223 enum netfs_sreq_ref_trace what) 224 { 225 unsigned int debug_index = subreq->debug_index; 226 unsigned int debug_id = subreq->rreq->debug_id; 227 bool dead; 228 int r; 229 230 dead = __refcount_dec_and_test(&subreq->ref, &r); 231 trace_netfs_sreq_ref(debug_id, debug_index, r - 1, what); 232 if (dead) 233 netfs_free_subrequest(subreq, was_async); 234 } 235