1 /* 2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 /* Crude resource management */ 33 #include <linux/kernel.h> 34 #include <linux/random.h> 35 #include <linux/slab.h> 36 #include <linux/kfifo.h> 37 #include <linux/spinlock.h> 38 #include <linux/errno.h> 39 #include <linux/genalloc.h> 40 #include "iw_cxgb4.h" 41 42 #define RANDOM_SIZE 16 43 44 static int __c4iw_init_resource_fifo(struct kfifo *fifo, 45 spinlock_t *fifo_lock, 46 u32 nr, u32 skip_low, 47 u32 skip_high, 48 int random) 49 { 50 u32 i, j, entry = 0, idx; 51 u32 random_bytes; 52 u32 rarray[16]; 53 spin_lock_init(fifo_lock); 54 55 if (kfifo_alloc(fifo, nr * sizeof(u32), GFP_KERNEL)) 56 return -ENOMEM; 57 58 for (i = 0; i < skip_low + skip_high; i++) 59 kfifo_in(fifo, (unsigned char *) &entry, sizeof(u32)); 60 if (random) { 61 j = 0; 62 random_bytes = random32(); 63 for (i = 0; i < RANDOM_SIZE; i++) 64 rarray[i] = i + skip_low; 65 for (i = skip_low + RANDOM_SIZE; i < nr - skip_high; i++) { 66 if (j >= RANDOM_SIZE) { 67 j = 0; 68 random_bytes = random32(); 69 } 70 idx = (random_bytes >> (j * 2)) & 0xF; 71 kfifo_in(fifo, 72 (unsigned char *) &rarray[idx], 73 sizeof(u32)); 74 rarray[idx] = i; 75 j++; 76 } 77 for (i = 0; i < RANDOM_SIZE; i++) 78 kfifo_in(fifo, 79 (unsigned char *) &rarray[i], 80 sizeof(u32)); 81 } else 82 for (i = skip_low; i < nr - skip_high; i++) 83 kfifo_in(fifo, (unsigned char *) &i, sizeof(u32)); 84 85 for (i = 0; i < skip_low + skip_high; i++) 86 if (kfifo_out_locked(fifo, (unsigned char *) &entry, 87 sizeof(u32), fifo_lock)) 88 break; 89 return 0; 90 } 91 92 static int c4iw_init_resource_fifo(struct kfifo *fifo, spinlock_t * fifo_lock, 93 u32 nr, u32 skip_low, u32 skip_high) 94 { 95 return __c4iw_init_resource_fifo(fifo, fifo_lock, nr, skip_low, 96 skip_high, 0); 97 } 98 99 static int c4iw_init_resource_fifo_random(struct kfifo *fifo, 100 spinlock_t *fifo_lock, 101 u32 nr, u32 skip_low, u32 skip_high) 102 { 103 return __c4iw_init_resource_fifo(fifo, fifo_lock, nr, skip_low, 104 skip_high, 1); 105 } 106 107 static int c4iw_init_qid_fifo(struct c4iw_rdev *rdev) 108 { 109 u32 i; 110 111 spin_lock_init(&rdev->resource.qid_fifo_lock); 112 113 if (kfifo_alloc(&rdev->resource.qid_fifo, rdev->lldi.vr->qp.size * 114 sizeof(u32), GFP_KERNEL)) 115 return -ENOMEM; 116 117 for (i = rdev->lldi.vr->qp.start; 118 i < rdev->lldi.vr->qp.start + rdev->lldi.vr->qp.size; i++) 119 if (!(i & rdev->qpmask)) 120 kfifo_in(&rdev->resource.qid_fifo, 121 (unsigned char *) &i, sizeof(u32)); 122 return 0; 123 } 124 125 /* nr_* must be power of 2 */ 126 int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid) 127 { 128 int err = 0; 129 err = c4iw_init_resource_fifo_random(&rdev->resource.tpt_fifo, 130 &rdev->resource.tpt_fifo_lock, 131 nr_tpt, 1, 0); 132 if (err) 133 goto tpt_err; 134 err = c4iw_init_qid_fifo(rdev); 135 if (err) 136 goto qid_err; 137 err = c4iw_init_resource_fifo(&rdev->resource.pdid_fifo, 138 &rdev->resource.pdid_fifo_lock, 139 nr_pdid, 1, 0); 140 if (err) 141 goto pdid_err; 142 return 0; 143 pdid_err: 144 kfifo_free(&rdev->resource.qid_fifo); 145 qid_err: 146 kfifo_free(&rdev->resource.tpt_fifo); 147 tpt_err: 148 return -ENOMEM; 149 } 150 151 /* 152 * returns 0 if no resource available 153 */ 154 u32 c4iw_get_resource(struct kfifo *fifo, spinlock_t *lock) 155 { 156 u32 entry; 157 if (kfifo_out_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock)) 158 return entry; 159 else 160 return 0; 161 } 162 163 void c4iw_put_resource(struct kfifo *fifo, u32 entry, spinlock_t *lock) 164 { 165 PDBG("%s entry 0x%x\n", __func__, entry); 166 kfifo_in_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock); 167 } 168 169 u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx) 170 { 171 struct c4iw_qid_list *entry; 172 u32 qid; 173 int i; 174 175 mutex_lock(&uctx->lock); 176 if (!list_empty(&uctx->cqids)) { 177 entry = list_entry(uctx->cqids.next, struct c4iw_qid_list, 178 entry); 179 list_del(&entry->entry); 180 qid = entry->qid; 181 kfree(entry); 182 } else { 183 qid = c4iw_get_resource(&rdev->resource.qid_fifo, 184 &rdev->resource.qid_fifo_lock); 185 if (!qid) 186 goto out; 187 for (i = qid+1; i & rdev->qpmask; i++) { 188 entry = kmalloc(sizeof *entry, GFP_KERNEL); 189 if (!entry) 190 goto out; 191 entry->qid = i; 192 list_add_tail(&entry->entry, &uctx->cqids); 193 } 194 195 /* 196 * now put the same ids on the qp list since they all 197 * map to the same db/gts page. 198 */ 199 entry = kmalloc(sizeof *entry, GFP_KERNEL); 200 if (!entry) 201 goto out; 202 entry->qid = qid; 203 list_add_tail(&entry->entry, &uctx->qpids); 204 for (i = qid+1; i & rdev->qpmask; i++) { 205 entry = kmalloc(sizeof *entry, GFP_KERNEL); 206 if (!entry) 207 goto out; 208 entry->qid = i; 209 list_add_tail(&entry->entry, &uctx->qpids); 210 } 211 } 212 out: 213 mutex_unlock(&uctx->lock); 214 PDBG("%s qid 0x%x\n", __func__, qid); 215 return qid; 216 } 217 218 void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid, 219 struct c4iw_dev_ucontext *uctx) 220 { 221 struct c4iw_qid_list *entry; 222 223 entry = kmalloc(sizeof *entry, GFP_KERNEL); 224 if (!entry) 225 return; 226 PDBG("%s qid 0x%x\n", __func__, qid); 227 entry->qid = qid; 228 mutex_lock(&uctx->lock); 229 list_add_tail(&entry->entry, &uctx->cqids); 230 mutex_unlock(&uctx->lock); 231 } 232 233 u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx) 234 { 235 struct c4iw_qid_list *entry; 236 u32 qid; 237 int i; 238 239 mutex_lock(&uctx->lock); 240 if (!list_empty(&uctx->qpids)) { 241 entry = list_entry(uctx->qpids.next, struct c4iw_qid_list, 242 entry); 243 list_del(&entry->entry); 244 qid = entry->qid; 245 kfree(entry); 246 } else { 247 qid = c4iw_get_resource(&rdev->resource.qid_fifo, 248 &rdev->resource.qid_fifo_lock); 249 if (!qid) 250 goto out; 251 for (i = qid+1; i & rdev->qpmask; i++) { 252 entry = kmalloc(sizeof *entry, GFP_KERNEL); 253 if (!entry) 254 goto out; 255 entry->qid = i; 256 list_add_tail(&entry->entry, &uctx->qpids); 257 } 258 259 /* 260 * now put the same ids on the cq list since they all 261 * map to the same db/gts page. 262 */ 263 entry = kmalloc(sizeof *entry, GFP_KERNEL); 264 if (!entry) 265 goto out; 266 entry->qid = qid; 267 list_add_tail(&entry->entry, &uctx->cqids); 268 for (i = qid; i & rdev->qpmask; i++) { 269 entry = kmalloc(sizeof *entry, GFP_KERNEL); 270 if (!entry) 271 goto out; 272 entry->qid = i; 273 list_add_tail(&entry->entry, &uctx->cqids); 274 } 275 } 276 out: 277 mutex_unlock(&uctx->lock); 278 PDBG("%s qid 0x%x\n", __func__, qid); 279 return qid; 280 } 281 282 void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid, 283 struct c4iw_dev_ucontext *uctx) 284 { 285 struct c4iw_qid_list *entry; 286 287 entry = kmalloc(sizeof *entry, GFP_KERNEL); 288 if (!entry) 289 return; 290 PDBG("%s qid 0x%x\n", __func__, qid); 291 entry->qid = qid; 292 mutex_lock(&uctx->lock); 293 list_add_tail(&entry->entry, &uctx->qpids); 294 mutex_unlock(&uctx->lock); 295 } 296 297 void c4iw_destroy_resource(struct c4iw_resource *rscp) 298 { 299 kfifo_free(&rscp->tpt_fifo); 300 kfifo_free(&rscp->qid_fifo); 301 kfifo_free(&rscp->pdid_fifo); 302 } 303 304 /* 305 * PBL Memory Manager. Uses Linux generic allocator. 306 */ 307 308 #define MIN_PBL_SHIFT 8 /* 256B == min PBL size (32 entries) */ 309 310 u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size) 311 { 312 unsigned long addr = gen_pool_alloc(rdev->pbl_pool, size); 313 PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size); 314 return (u32)addr; 315 } 316 317 void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size) 318 { 319 PDBG("%s addr 0x%x size %d\n", __func__, addr, size); 320 gen_pool_free(rdev->pbl_pool, (unsigned long)addr, size); 321 } 322 323 int c4iw_pblpool_create(struct c4iw_rdev *rdev) 324 { 325 unsigned pbl_start, pbl_chunk, pbl_top; 326 327 rdev->pbl_pool = gen_pool_create(MIN_PBL_SHIFT, -1); 328 if (!rdev->pbl_pool) 329 return -ENOMEM; 330 331 pbl_start = rdev->lldi.vr->pbl.start; 332 pbl_chunk = rdev->lldi.vr->pbl.size; 333 pbl_top = pbl_start + pbl_chunk; 334 335 while (pbl_start < pbl_top) { 336 pbl_chunk = min(pbl_top - pbl_start + 1, pbl_chunk); 337 if (gen_pool_add(rdev->pbl_pool, pbl_start, pbl_chunk, -1)) { 338 PDBG("%s failed to add PBL chunk (%x/%x)\n", 339 __func__, pbl_start, pbl_chunk); 340 if (pbl_chunk <= 1024 << MIN_PBL_SHIFT) { 341 printk(KERN_WARNING MOD 342 "Failed to add all PBL chunks (%x/%x)\n", 343 pbl_start, 344 pbl_top - pbl_start); 345 return 0; 346 } 347 pbl_chunk >>= 1; 348 } else { 349 PDBG("%s added PBL chunk (%x/%x)\n", 350 __func__, pbl_start, pbl_chunk); 351 pbl_start += pbl_chunk; 352 } 353 } 354 355 return 0; 356 } 357 358 void c4iw_pblpool_destroy(struct c4iw_rdev *rdev) 359 { 360 gen_pool_destroy(rdev->pbl_pool); 361 } 362 363 /* 364 * RQT Memory Manager. Uses Linux generic allocator. 365 */ 366 367 #define MIN_RQT_SHIFT 10 /* 1KB == min RQT size (16 entries) */ 368 369 u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size) 370 { 371 unsigned long addr = gen_pool_alloc(rdev->rqt_pool, size << 6); 372 PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size << 6); 373 return (u32)addr; 374 } 375 376 void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size) 377 { 378 PDBG("%s addr 0x%x size %d\n", __func__, addr, size << 6); 379 gen_pool_free(rdev->rqt_pool, (unsigned long)addr, size << 6); 380 } 381 382 int c4iw_rqtpool_create(struct c4iw_rdev *rdev) 383 { 384 unsigned rqt_start, rqt_chunk, rqt_top; 385 386 rdev->rqt_pool = gen_pool_create(MIN_RQT_SHIFT, -1); 387 if (!rdev->rqt_pool) 388 return -ENOMEM; 389 390 rqt_start = rdev->lldi.vr->rq.start; 391 rqt_chunk = rdev->lldi.vr->rq.size; 392 rqt_top = rqt_start + rqt_chunk; 393 394 while (rqt_start < rqt_top) { 395 rqt_chunk = min(rqt_top - rqt_start + 1, rqt_chunk); 396 if (gen_pool_add(rdev->rqt_pool, rqt_start, rqt_chunk, -1)) { 397 PDBG("%s failed to add RQT chunk (%x/%x)\n", 398 __func__, rqt_start, rqt_chunk); 399 if (rqt_chunk <= 1024 << MIN_RQT_SHIFT) { 400 printk(KERN_WARNING MOD 401 "Failed to add all RQT chunks (%x/%x)\n", 402 rqt_start, rqt_top - rqt_start); 403 return 0; 404 } 405 rqt_chunk >>= 1; 406 } else { 407 PDBG("%s added RQT chunk (%x/%x)\n", 408 __func__, rqt_start, rqt_chunk); 409 rqt_start += rqt_chunk; 410 } 411 } 412 return 0; 413 } 414 415 void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev) 416 { 417 gen_pool_destroy(rdev->rqt_pool); 418 } 419