1 /*- 2 * Copyright (c) 2014 Gleb Smirnoff <glebius@FreeBSD.org> 3 * Copyright (c) 2003, 2005 Alan L. Cox <alc@cs.rice.edu> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include <sys/param.h> 32 #include <sys/kernel.h> 33 #include <sys/lock.h> 34 #include <sys/malloc.h> 35 #include <sys/mutex.h> 36 #include <sys/proc.h> 37 #include <sys/sf_buf.h> 38 #include <sys/smp.h> 39 #include <sys/sysctl.h> 40 41 #include <vm/vm.h> 42 #include <vm/vm_extern.h> 43 #include <vm/vm_page.h> 44 45 #ifndef NSFBUFS 46 #define NSFBUFS (512 + maxusers * 16) 47 #endif 48 49 static int nsfbufs; 50 static int nsfbufspeak; 51 static int nsfbufsused; 52 53 SYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufs, CTLFLAG_RDTUN, &nsfbufs, 0, 54 "Maximum number of sendfile(2) sf_bufs available"); 55 SYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufspeak, CTLFLAG_RD, &nsfbufspeak, 0, 56 "Number of sendfile(2) sf_bufs at peak usage"); 57 SYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufsused, CTLFLAG_RD, &nsfbufsused, 0, 58 "Number of sendfile(2) sf_bufs in use"); 59 60 static void sf_buf_init(void *arg); 61 SYSINIT(sock_sf, SI_SUB_MBUF, SI_ORDER_ANY, sf_buf_init, NULL); 62 63 LIST_HEAD(sf_head, sf_buf); 64 65 /* 66 * A hash table of active sendfile(2) buffers 67 */ 68 static struct sf_head *sf_buf_active; 69 static u_long sf_buf_hashmask; 70 71 #define SF_BUF_HASH(m) (((m) - vm_page_array) & sf_buf_hashmask) 72 73 static TAILQ_HEAD(, sf_buf) sf_buf_freelist; 74 static u_int sf_buf_alloc_want; 75 76 /* 77 * A lock used to synchronize access to the hash table and free list 78 */ 79 static struct mtx sf_buf_lock; 80 81 /* 82 * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-)) 83 */ 84 static void 85 sf_buf_init(void *arg) 86 { 87 struct sf_buf *sf_bufs; 88 vm_offset_t sf_base; 89 int i; 90 91 #ifdef SFBUF_OPTIONAL_DIRECT_MAP 92 if (SFBUF_OPTIONAL_DIRECT_MAP) 93 return; 94 #endif 95 96 nsfbufs = NSFBUFS; 97 TUNABLE_INT_FETCH("kern.ipc.nsfbufs", &nsfbufs); 98 99 sf_buf_active = hashinit(nsfbufs, M_TEMP, &sf_buf_hashmask); 100 TAILQ_INIT(&sf_buf_freelist); 101 sf_base = kva_alloc(nsfbufs * PAGE_SIZE); 102 sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP, 103 M_WAITOK | M_ZERO); 104 for (i = 0; i < nsfbufs; i++) { 105 sf_bufs[i].kva = sf_base + i * PAGE_SIZE; 106 TAILQ_INSERT_TAIL(&sf_buf_freelist, &sf_bufs[i], free_entry); 107 } 108 sf_buf_alloc_want = 0; 109 mtx_init(&sf_buf_lock, "sf_buf", NULL, MTX_DEF); 110 } 111 112 /* 113 * Get an sf_buf from the freelist. May block if none are available. 114 */ 115 struct sf_buf * 116 sf_buf_alloc(struct vm_page *m, int flags) 117 { 118 struct sf_head *hash_list; 119 struct sf_buf *sf; 120 int error; 121 122 #ifdef SFBUF_OPTIONAL_DIRECT_MAP 123 if (SFBUF_OPTIONAL_DIRECT_MAP) 124 return ((struct sf_buf *)m); 125 #endif 126 127 KASSERT(curthread->td_pinned > 0 || (flags & SFB_CPUPRIVATE) == 0, 128 ("sf_buf_alloc(SFB_CPUPRIVATE): curthread not pinned")); 129 hash_list = &sf_buf_active[SF_BUF_HASH(m)]; 130 mtx_lock(&sf_buf_lock); 131 LIST_FOREACH(sf, hash_list, list_entry) { 132 if (sf->m == m) { 133 sf->ref_count++; 134 if (sf->ref_count == 1) { 135 TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry); 136 nsfbufsused++; 137 nsfbufspeak = imax(nsfbufspeak, nsfbufsused); 138 } 139 #if defined(SMP) && defined(SFBUF_CPUSET) 140 sf_buf_shootdown(sf, flags); 141 #endif 142 goto done; 143 } 144 } 145 while ((sf = TAILQ_FIRST(&sf_buf_freelist)) == NULL) { 146 if (flags & SFB_NOWAIT) 147 goto done; 148 sf_buf_alloc_want++; 149 SFSTAT_INC(sf_allocwait); 150 error = msleep(&sf_buf_freelist, &sf_buf_lock, 151 (flags & SFB_CATCH) ? PCATCH | PVM : PVM, "sfbufa", 0); 152 sf_buf_alloc_want--; 153 154 /* 155 * If we got a signal, don't risk going back to sleep. 156 */ 157 if (error) 158 goto done; 159 } 160 TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry); 161 if (sf->m != NULL) 162 LIST_REMOVE(sf, list_entry); 163 LIST_INSERT_HEAD(hash_list, sf, list_entry); 164 sf->ref_count = 1; 165 sf->m = m; 166 nsfbufsused++; 167 nsfbufspeak = imax(nsfbufspeak, nsfbufsused); 168 sf_buf_map(sf, flags); 169 done: 170 mtx_unlock(&sf_buf_lock); 171 return (sf); 172 } 173 174 /* 175 * Remove a reference from the given sf_buf, adding it to the free 176 * list when its reference count reaches zero. A freed sf_buf still, 177 * however, retains its virtual-to-physical mapping until it is 178 * recycled or reactivated by sf_buf_alloc(9). 179 */ 180 void 181 sf_buf_free(struct sf_buf *sf) 182 { 183 184 #ifdef SFBUF_OPTIONAL_DIRECT_MAP 185 if (SFBUF_OPTIONAL_DIRECT_MAP) 186 return; 187 #endif 188 189 mtx_lock(&sf_buf_lock); 190 sf->ref_count--; 191 if (sf->ref_count == 0) { 192 TAILQ_INSERT_TAIL(&sf_buf_freelist, sf, free_entry); 193 nsfbufsused--; 194 if (sf_buf_unmap(sf)) { 195 sf->m = NULL; 196 LIST_REMOVE(sf, list_entry); 197 } 198 if (sf_buf_alloc_want > 0) 199 wakeup(&sf_buf_freelist); 200 } 201 mtx_unlock(&sf_buf_lock); 202 } 203 204 void 205 sf_buf_ref(struct sf_buf *sf) 206 { 207 208 #ifdef SFBUF_OPTIONAL_DIRECT_MAP 209 if (SFBUF_OPTIONAL_DIRECT_MAP) 210 return; 211 #endif 212 213 mtx_lock(&sf_buf_lock); 214 KASSERT(sf->ref_count > 0, ("%s: sf %p not allocated", __func__, sf)); 215 sf->ref_count++; 216 mtx_unlock(&sf_buf_lock); 217 } 218 219 #ifdef SFBUF_PROCESS_PAGE 220 /* 221 * Run callback function on sf_buf that holds a certain page. 222 */ 223 boolean_t 224 sf_buf_process_page(vm_page_t m, void (*cb)(struct sf_buf *)) 225 { 226 struct sf_head *hash_list; 227 struct sf_buf *sf; 228 229 hash_list = &sf_buf_active[SF_BUF_HASH(m)]; 230 mtx_lock(&sf_buf_lock); 231 LIST_FOREACH(sf, hash_list, list_entry) { 232 if (sf->m == m) { 233 cb(sf); 234 mtx_unlock(&sf_buf_lock); 235 return (TRUE); 236 } 237 } 238 mtx_unlock(&sf_buf_lock); 239 return (FALSE); 240 } 241 #endif /* SFBUF_PROCESS_PAGE */ 242