1 /* 2 * Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 /* Crude resource management */ 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include "opt_inet.h" 37 38 #ifdef TCP_OFFLOAD 39 #include <linux/spinlock.h> 40 #include "iw_cxgbe.h" 41 42 static int c4iw_init_qid_table(struct c4iw_rdev *rdev) 43 { 44 u32 i; 45 46 if (c4iw_id_table_alloc(&rdev->resource.qid_table, 47 rdev->adap->vres.qp.start, 48 rdev->adap->vres.qp.size, 49 rdev->adap->vres.qp.size, 0)) { 50 printf("%s: return ENOMEM\n", __func__); 51 return -ENOMEM; 52 } 53 54 for (i = rdev->adap->vres.qp.start; 55 i < rdev->adap->vres.qp.start + rdev->adap->vres.qp.size; i++) 56 if (!(i & rdev->qpmask)) 57 c4iw_id_free(&rdev->resource.qid_table, i); 58 return 0; 59 } 60 61 /* nr_* must be power of 2 */ 62 int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid) 63 { 64 int err = 0; 65 err = c4iw_id_table_alloc(&rdev->resource.tpt_table, 0, nr_tpt, 1, 66 C4IW_ID_TABLE_F_RANDOM); 67 if (err) 68 goto tpt_err; 69 err = c4iw_init_qid_table(rdev); 70 if (err) 71 goto qid_err; 72 err = c4iw_id_table_alloc(&rdev->resource.pdid_table, 0, 73 nr_pdid, 1, 0); 74 if (err) 75 goto pdid_err; 76 return 0; 77 pdid_err: 78 c4iw_id_table_free(&rdev->resource.qid_table); 79 qid_err: 80 c4iw_id_table_free(&rdev->resource.tpt_table); 81 tpt_err: 82 return -ENOMEM; 83 } 84 85 /* 86 * returns 0 if no resource available 87 */ 88 u32 c4iw_get_resource(struct c4iw_id_table *id_table) 89 { 90 u32 entry; 91 entry = c4iw_id_alloc(id_table); 92 if (entry == (u32)(-1)) { 93 return 0; 94 } 95 return entry; 96 } 97 98 void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry) 99 { 100 CTR2(KTR_IW_CXGBE, "%s entry 0x%x", __func__, entry); 101 c4iw_id_free(id_table, entry); 102 } 103 104 u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx) 105 { 106 struct c4iw_qid_list *entry; 107 u32 qid; 108 int i; 109 110 mutex_lock(&uctx->lock); 111 if (!list_empty(&uctx->cqids)) { 112 entry = list_entry(uctx->cqids.next, struct c4iw_qid_list, 113 entry); 114 list_del(&entry->entry); 115 qid = entry->qid; 116 kfree(entry); 117 } else { 118 qid = c4iw_get_resource(&rdev->resource.qid_table); 119 if (!qid) 120 goto out; 121 mutex_lock(&rdev->stats.lock); 122 rdev->stats.qid.cur += rdev->qpmask + 1; 123 mutex_unlock(&rdev->stats.lock); 124 for (i = qid+1; i & rdev->qpmask; i++) { 125 entry = kmalloc(sizeof *entry, GFP_KERNEL); 126 if (!entry) 127 goto out; 128 entry->qid = i; 129 list_add_tail(&entry->entry, &uctx->cqids); 130 } 131 132 /* 133 * now put the same ids on the qp list since they all 134 * map to the same db/gts page. 135 */ 136 entry = kmalloc(sizeof *entry, GFP_KERNEL); 137 if (!entry) 138 goto out; 139 entry->qid = qid; 140 list_add_tail(&entry->entry, &uctx->qpids); 141 for (i = qid+1; i & rdev->qpmask; i++) { 142 entry = kmalloc(sizeof *entry, GFP_KERNEL); 143 if (!entry) 144 goto out; 145 entry->qid = i; 146 list_add_tail(&entry->entry, &uctx->qpids); 147 } 148 } 149 out: 150 mutex_unlock(&uctx->lock); 151 CTR2(KTR_IW_CXGBE, "%s: qid 0x%x", __func__, qid); 152 mutex_lock(&rdev->stats.lock); 153 if (rdev->stats.qid.cur > rdev->stats.qid.max) 154 rdev->stats.qid.max = rdev->stats.qid.cur; 155 mutex_unlock(&rdev->stats.lock); 156 return qid; 157 } 158 159 void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid, 160 struct c4iw_dev_ucontext *uctx) 161 { 162 struct c4iw_qid_list *entry; 163 164 entry = kmalloc(sizeof *entry, GFP_KERNEL); 165 if (!entry) 166 return; 167 CTR2(KTR_IW_CXGBE, "%s qid 0x%x", __func__, qid); 168 entry->qid = qid; 169 mutex_lock(&uctx->lock); 170 list_add_tail(&entry->entry, &uctx->cqids); 171 mutex_unlock(&uctx->lock); 172 } 173 174 u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx) 175 { 176 struct c4iw_qid_list *entry; 177 u32 qid; 178 int i; 179 180 mutex_lock(&uctx->lock); 181 if (!list_empty(&uctx->qpids)) { 182 entry = list_entry(uctx->qpids.next, struct c4iw_qid_list, 183 entry); 184 list_del(&entry->entry); 185 qid = entry->qid; 186 kfree(entry); 187 } else { 188 qid = c4iw_get_resource(&rdev->resource.qid_table); 189 if (!qid) 190 goto out; 191 mutex_lock(&rdev->stats.lock); 192 rdev->stats.qid.cur += rdev->qpmask + 1; 193 mutex_unlock(&rdev->stats.lock); 194 for (i = qid+1; i & rdev->qpmask; i++) { 195 entry = kmalloc(sizeof *entry, GFP_KERNEL); 196 if (!entry) 197 goto out; 198 entry->qid = i; 199 list_add_tail(&entry->entry, &uctx->qpids); 200 } 201 202 /* 203 * now put the same ids on the cq list since they all 204 * map to the same db/gts page. 205 */ 206 entry = kmalloc(sizeof *entry, GFP_KERNEL); 207 if (!entry) 208 goto out; 209 entry->qid = qid; 210 list_add_tail(&entry->entry, &uctx->cqids); 211 for (i = qid; i & rdev->qpmask; i++) { 212 entry = kmalloc(sizeof *entry, GFP_KERNEL); 213 if (!entry) 214 goto out; 215 entry->qid = i; 216 list_add_tail(&entry->entry, &uctx->cqids); 217 } 218 } 219 out: 220 mutex_unlock(&uctx->lock); 221 CTR2(KTR_IW_CXGBE, "%s qid 0x%x", __func__, qid); 222 mutex_lock(&rdev->stats.lock); 223 if (rdev->stats.qid.cur > rdev->stats.qid.max) 224 rdev->stats.qid.max = rdev->stats.qid.cur; 225 mutex_unlock(&rdev->stats.lock); 226 return qid; 227 } 228 229 void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid, 230 struct c4iw_dev_ucontext *uctx) 231 { 232 struct c4iw_qid_list *entry; 233 234 entry = kmalloc(sizeof *entry, GFP_KERNEL); 235 if (!entry) 236 return; 237 CTR2(KTR_IW_CXGBE, "%s qid 0x%x", __func__, qid); 238 entry->qid = qid; 239 mutex_lock(&uctx->lock); 240 list_add_tail(&entry->entry, &uctx->qpids); 241 mutex_unlock(&uctx->lock); 242 } 243 244 void c4iw_destroy_resource(struct c4iw_resource *rscp) 245 { 246 c4iw_id_table_free(&rscp->tpt_table); 247 c4iw_id_table_free(&rscp->qid_table); 248 c4iw_id_table_free(&rscp->pdid_table); 249 } 250 251 /* PBL Memory Manager. Uses Linux generic allocator. */ 252 253 #define MIN_PBL_SHIFT 8 /* 256B == min PBL size (32 entries) */ 254 255 u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size) 256 { 257 unsigned long addr = gen_pool_alloc(rdev->pbl_pool, size); 258 CTR3(KTR_IW_CXGBE, "%s addr 0x%x size %d", __func__, (u32)addr, size); 259 mutex_lock(&rdev->stats.lock); 260 if (addr) { 261 rdev->stats.pbl.cur += roundup(size, 1 << MIN_PBL_SHIFT); 262 if (rdev->stats.pbl.cur > rdev->stats.pbl.max) 263 rdev->stats.pbl.max = rdev->stats.pbl.cur; 264 } else 265 rdev->stats.pbl.fail++; 266 mutex_unlock(&rdev->stats.lock); 267 return (u32)addr; 268 } 269 270 void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size) 271 { 272 CTR3(KTR_IW_CXGBE, "%s addr 0x%x size %d", __func__, addr, size); 273 mutex_lock(&rdev->stats.lock); 274 rdev->stats.pbl.cur -= roundup(size, 1 << MIN_PBL_SHIFT); 275 mutex_unlock(&rdev->stats.lock); 276 gen_pool_free(rdev->pbl_pool, (unsigned long)addr, size); 277 } 278 279 int c4iw_pblpool_create(struct c4iw_rdev *rdev) 280 { 281 rdev->pbl_pool = gen_pool_create(rdev->adap->vres.pbl.start, 282 MIN_PBL_SHIFT, 283 rdev->adap->vres.pbl.size); 284 if (!rdev->pbl_pool) 285 return -ENOMEM; 286 287 return 0; 288 } 289 290 void c4iw_pblpool_destroy(struct c4iw_rdev *rdev) 291 { 292 gen_pool_destroy(rdev->pbl_pool); 293 } 294 295 /* RQT Memory Manager. Uses Linux generic allocator. */ 296 297 #define MIN_RQT_SHIFT 10 /* 1KB == min RQT size (16 entries) */ 298 299 u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size) 300 { 301 unsigned long addr = gen_pool_alloc(rdev->rqt_pool, size << 6); 302 CTR3(KTR_IW_CXGBE, "%s addr 0x%x size %d", __func__, (u32)addr, 303 size << 6); 304 if (!addr) 305 printf("%s: Out of RQT memory\n", 306 device_get_nameunit(rdev->adap->dev)); 307 mutex_lock(&rdev->stats.lock); 308 if (addr) { 309 rdev->stats.rqt.cur += roundup(size << 6, 1 << MIN_RQT_SHIFT); 310 if (rdev->stats.rqt.cur > rdev->stats.rqt.max) 311 rdev->stats.rqt.max = rdev->stats.rqt.cur; 312 } else 313 rdev->stats.rqt.fail++; 314 mutex_unlock(&rdev->stats.lock); 315 return (u32)addr; 316 } 317 318 void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size) 319 { 320 CTR3(KTR_IW_CXGBE, "%s addr 0x%x size %d", __func__, addr, size << 6); 321 mutex_lock(&rdev->stats.lock); 322 rdev->stats.rqt.cur -= roundup(size << 6, 1 << MIN_RQT_SHIFT); 323 mutex_unlock(&rdev->stats.lock); 324 gen_pool_free(rdev->rqt_pool, (unsigned long)addr, size << 6); 325 } 326 327 int c4iw_rqtpool_create(struct c4iw_rdev *rdev) 328 { 329 rdev->rqt_pool = gen_pool_create(rdev->adap->vres.rq.start, 330 MIN_RQT_SHIFT, 331 rdev->adap->vres.rq.size); 332 if (!rdev->rqt_pool) 333 return -ENOMEM; 334 335 return 0; 336 } 337 338 void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev) 339 { 340 gen_pool_destroy(rdev->rqt_pool); 341 } 342 #endif 343