1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 /* Crude resource management */ 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include "opt_inet.h" 39 40 #ifdef TCP_OFFLOAD 41 #include <linux/spinlock.h> 42 #include "iw_cxgbe.h" 43 44 static int c4iw_init_qid_table(struct c4iw_rdev *rdev) 45 { 46 u32 i; 47 48 if (c4iw_id_table_alloc(&rdev->resource.qid_table, 49 rdev->adap->vres.qp.start, 50 rdev->adap->vres.qp.size, 51 rdev->adap->vres.qp.size, 0)) { 52 printf("%s: return ENOMEM\n", __func__); 53 return -ENOMEM; 54 } 55 56 for (i = rdev->adap->vres.qp.start; 57 i < rdev->adap->vres.qp.start + rdev->adap->vres.qp.size; i++) 58 if (!(i & rdev->qpmask)) 59 c4iw_id_free(&rdev->resource.qid_table, i); 60 return 0; 61 } 62 63 /* nr_* must be power of 2 */ 64 int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid) 65 { 66 int err = 0; 67 err = c4iw_id_table_alloc(&rdev->resource.tpt_table, 0, nr_tpt, 1, 68 C4IW_ID_TABLE_F_RANDOM); 69 if (err) 70 goto tpt_err; 71 err = c4iw_init_qid_table(rdev); 72 if (err) 73 goto qid_err; 74 err = c4iw_id_table_alloc(&rdev->resource.pdid_table, 0, 75 nr_pdid, 1, 0); 76 if (err) 77 goto pdid_err; 78 return 0; 79 pdid_err: 80 c4iw_id_table_free(&rdev->resource.qid_table); 81 qid_err: 82 c4iw_id_table_free(&rdev->resource.tpt_table); 83 tpt_err: 84 return -ENOMEM; 85 } 86 87 /* 88 * returns 0 if no resource available 89 */ 90 u32 c4iw_get_resource(struct c4iw_id_table *id_table) 91 { 92 u32 entry; 93 entry = c4iw_id_alloc(id_table); 94 if (entry == (u32)(-1)) { 95 return 0; 96 } 97 return entry; 98 } 99 100 void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry) 101 { 102 CTR2(KTR_IW_CXGBE, "%s entry 0x%x", __func__, entry); 103 c4iw_id_free(id_table, entry); 104 } 105 106 u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx) 107 { 108 struct c4iw_qid_list *entry; 109 u32 qid; 110 int i; 111 112 mutex_lock(&uctx->lock); 113 if (!list_empty(&uctx->cqids)) { 114 entry = list_entry(uctx->cqids.next, struct c4iw_qid_list, 115 entry); 116 list_del(&entry->entry); 117 qid = entry->qid; 118 kfree(entry); 119 } else { 120 qid = c4iw_get_resource(&rdev->resource.qid_table); 121 if (!qid) 122 goto out; 123 mutex_lock(&rdev->stats.lock); 124 rdev->stats.qid.cur += rdev->qpmask + 1; 125 mutex_unlock(&rdev->stats.lock); 126 for (i = qid+1; i & rdev->qpmask; i++) { 127 entry = kmalloc(sizeof *entry, GFP_KERNEL); 128 if (!entry) 129 goto out; 130 entry->qid = i; 131 list_add_tail(&entry->entry, &uctx->cqids); 132 } 133 134 /* 135 * now put the same ids on the qp list since they all 136 * map to the same db/gts page. 137 */ 138 entry = kmalloc(sizeof *entry, GFP_KERNEL); 139 if (!entry) 140 goto out; 141 entry->qid = qid; 142 list_add_tail(&entry->entry, &uctx->qpids); 143 for (i = qid+1; i & rdev->qpmask; i++) { 144 entry = kmalloc(sizeof *entry, GFP_KERNEL); 145 if (!entry) 146 goto out; 147 entry->qid = i; 148 list_add_tail(&entry->entry, &uctx->qpids); 149 } 150 } 151 out: 152 mutex_unlock(&uctx->lock); 153 CTR2(KTR_IW_CXGBE, "%s: qid 0x%x", __func__, qid); 154 mutex_lock(&rdev->stats.lock); 155 if (rdev->stats.qid.cur > rdev->stats.qid.max) 156 rdev->stats.qid.max = rdev->stats.qid.cur; 157 mutex_unlock(&rdev->stats.lock); 158 return qid; 159 } 160 161 void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid, 162 struct c4iw_dev_ucontext *uctx) 163 { 164 struct c4iw_qid_list *entry; 165 166 entry = kmalloc(sizeof *entry, GFP_KERNEL); 167 if (!entry) 168 return; 169 CTR2(KTR_IW_CXGBE, "%s qid 0x%x", __func__, qid); 170 entry->qid = qid; 171 mutex_lock(&uctx->lock); 172 list_add_tail(&entry->entry, &uctx->cqids); 173 mutex_unlock(&uctx->lock); 174 } 175 176 u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx) 177 { 178 struct c4iw_qid_list *entry; 179 u32 qid; 180 int i; 181 182 mutex_lock(&uctx->lock); 183 if (!list_empty(&uctx->qpids)) { 184 entry = list_entry(uctx->qpids.next, struct c4iw_qid_list, 185 entry); 186 list_del(&entry->entry); 187 qid = entry->qid; 188 kfree(entry); 189 } else { 190 qid = c4iw_get_resource(&rdev->resource.qid_table); 191 if (!qid) 192 goto out; 193 mutex_lock(&rdev->stats.lock); 194 rdev->stats.qid.cur += rdev->qpmask + 1; 195 mutex_unlock(&rdev->stats.lock); 196 for (i = qid+1; i & rdev->qpmask; i++) { 197 entry = kmalloc(sizeof *entry, GFP_KERNEL); 198 if (!entry) 199 goto out; 200 entry->qid = i; 201 list_add_tail(&entry->entry, &uctx->qpids); 202 } 203 204 /* 205 * now put the same ids on the cq list since they all 206 * map to the same db/gts page. 207 */ 208 entry = kmalloc(sizeof *entry, GFP_KERNEL); 209 if (!entry) 210 goto out; 211 entry->qid = qid; 212 list_add_tail(&entry->entry, &uctx->cqids); 213 for (i = qid; i & rdev->qpmask; i++) { 214 entry = kmalloc(sizeof *entry, GFP_KERNEL); 215 if (!entry) 216 goto out; 217 entry->qid = i; 218 list_add_tail(&entry->entry, &uctx->cqids); 219 } 220 } 221 out: 222 mutex_unlock(&uctx->lock); 223 CTR2(KTR_IW_CXGBE, "%s qid 0x%x", __func__, qid); 224 mutex_lock(&rdev->stats.lock); 225 if (rdev->stats.qid.cur > rdev->stats.qid.max) 226 rdev->stats.qid.max = rdev->stats.qid.cur; 227 mutex_unlock(&rdev->stats.lock); 228 return qid; 229 } 230 231 void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid, 232 struct c4iw_dev_ucontext *uctx) 233 { 234 struct c4iw_qid_list *entry; 235 236 entry = kmalloc(sizeof *entry, GFP_KERNEL); 237 if (!entry) 238 return; 239 CTR2(KTR_IW_CXGBE, "%s qid 0x%x", __func__, qid); 240 entry->qid = qid; 241 mutex_lock(&uctx->lock); 242 list_add_tail(&entry->entry, &uctx->qpids); 243 mutex_unlock(&uctx->lock); 244 } 245 246 void c4iw_destroy_resource(struct c4iw_resource *rscp) 247 { 248 c4iw_id_table_free(&rscp->tpt_table); 249 c4iw_id_table_free(&rscp->qid_table); 250 c4iw_id_table_free(&rscp->pdid_table); 251 } 252 253 /* PBL Memory Manager. */ 254 255 #define MIN_PBL_SHIFT 5 /* 32B == min PBL size (4 entries) */ 256 257 u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size) 258 { 259 unsigned long addr; 260 261 vmem_xalloc(rdev->pbl_arena, roundup(size, (1 << MIN_PBL_SHIFT)), 262 4, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX, 263 M_FIRSTFIT|M_NOWAIT, &addr); 264 CTR3(KTR_IW_CXGBE, "%s addr 0x%x size %d", __func__, (u32)addr, size); 265 mutex_lock(&rdev->stats.lock); 266 if (addr) { 267 rdev->stats.pbl.cur += roundup(size, 1 << MIN_PBL_SHIFT); 268 if (rdev->stats.pbl.cur > rdev->stats.pbl.max) 269 rdev->stats.pbl.max = rdev->stats.pbl.cur; 270 } else 271 rdev->stats.pbl.fail++; 272 mutex_unlock(&rdev->stats.lock); 273 return (u32)addr; 274 } 275 276 void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size) 277 { 278 CTR3(KTR_IW_CXGBE, "%s addr 0x%x size %d", __func__, addr, size); 279 mutex_lock(&rdev->stats.lock); 280 rdev->stats.pbl.cur -= roundup(size, 1 << MIN_PBL_SHIFT); 281 mutex_unlock(&rdev->stats.lock); 282 vmem_xfree(rdev->pbl_arena, addr, roundup(size,(1 << MIN_PBL_SHIFT))); 283 } 284 285 int c4iw_pblpool_create(struct c4iw_rdev *rdev) 286 { 287 rdev->pbl_arena = vmem_create("PBL_MEM_POOL", 288 rdev->adap->vres.pbl.start, 289 rdev->adap->vres.pbl.size, 290 1, 0, M_FIRSTFIT| M_NOWAIT); 291 if (!rdev->pbl_arena) 292 return -ENOMEM; 293 294 return 0; 295 } 296 297 void c4iw_pblpool_destroy(struct c4iw_rdev *rdev) 298 { 299 vmem_destroy(rdev->pbl_arena); 300 } 301 302 /* RQT Memory Manager. */ 303 304 #define MIN_RQT_SHIFT 10 /* 1KB == min RQT size (16 entries) */ 305 306 u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size) 307 { 308 unsigned long addr; 309 310 vmem_xalloc(rdev->rqt_arena, 311 roundup((size << 6),(1 << MIN_RQT_SHIFT)), 312 4, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX, 313 M_FIRSTFIT|M_NOWAIT, &addr); 314 CTR3(KTR_IW_CXGBE, "%s addr 0x%x size %d", __func__, (u32)addr, 315 size << 6); 316 if (!addr) 317 printf("%s: Out of RQT memory\n", 318 device_get_nameunit(rdev->adap->dev)); 319 mutex_lock(&rdev->stats.lock); 320 if (addr) { 321 rdev->stats.rqt.cur += roundup(size << 6, 1 << MIN_RQT_SHIFT); 322 if (rdev->stats.rqt.cur > rdev->stats.rqt.max) 323 rdev->stats.rqt.max = rdev->stats.rqt.cur; 324 } else 325 rdev->stats.rqt.fail++; 326 mutex_unlock(&rdev->stats.lock); 327 return (u32)addr; 328 } 329 330 void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size) 331 { 332 CTR3(KTR_IW_CXGBE, "%s addr 0x%x size %d", __func__, addr, size << 6); 333 mutex_lock(&rdev->stats.lock); 334 rdev->stats.rqt.cur -= roundup(size << 6, 1 << MIN_RQT_SHIFT); 335 mutex_unlock(&rdev->stats.lock); 336 vmem_xfree(rdev->rqt_arena, addr, 337 roundup((size << 6),(1 << MIN_RQT_SHIFT))); 338 } 339 340 int c4iw_rqtpool_create(struct c4iw_rdev *rdev) 341 { 342 rdev->rqt_arena = vmem_create("RQT_MEM_POOL", 343 rdev->adap->vres.rq.start, 344 rdev->adap->vres.rq.size, 345 1, 0, M_FIRSTFIT| M_NOWAIT); 346 if (!rdev->rqt_arena) 347 return -ENOMEM; 348 349 return 0; 350 } 351 352 void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev) 353 { 354 vmem_destroy(rdev->rqt_arena); 355 } 356 #endif 357