1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 /* Crude resource management */ 35 #include <sys/cdefs.h> 36 #include "opt_inet.h" 37 38 #ifdef TCP_OFFLOAD 39 #include <linux/spinlock.h> 40 #include "iw_cxgbe.h" 41 42 static int c4iw_init_qid_table(struct c4iw_rdev *rdev) 43 { 44 u32 i; 45 46 if (c4iw_id_table_alloc(&rdev->resource.qid_table, 47 rdev->adap->vres.qp.start, 48 rdev->adap->vres.qp.size, 49 rdev->adap->vres.qp.size, 0)) { 50 printf("%s: return ENOMEM\n", __func__); 51 return -ENOMEM; 52 } 53 54 for (i = rdev->adap->vres.qp.start; 55 i < rdev->adap->vres.qp.start + rdev->adap->vres.qp.size; i++) 56 if (!(i & rdev->qpmask)) 57 c4iw_id_free(&rdev->resource.qid_table, i); 58 return 0; 59 } 60 61 /* nr_* must be power of 2 */ 62 int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_pdid) 63 { 64 int err = 0; 65 err = c4iw_init_qid_table(rdev); 66 if (err) 67 goto qid_err; 68 err = c4iw_id_table_alloc(&rdev->resource.pdid_table, 0, 69 nr_pdid, 1, 0); 70 if (err) 71 goto pdid_err; 72 return 0; 73 pdid_err: 74 c4iw_id_table_free(&rdev->resource.qid_table); 75 qid_err: 76 return -ENOMEM; 77 } 78 79 /* 80 * returns 0 if no resource available 81 */ 82 u32 c4iw_get_resource(struct c4iw_id_table *id_table) 83 { 84 u32 entry; 85 entry = c4iw_id_alloc(id_table); 86 if (entry == (u32)(-1)) { 87 return 0; 88 } 89 return entry; 90 } 91 92 void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry) 93 { 94 CTR2(KTR_IW_CXGBE, "%s entry 0x%x", __func__, entry); 95 c4iw_id_free(id_table, entry); 96 } 97 98 u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx) 99 { 100 struct c4iw_qid_list *entry; 101 u32 qid; 102 int i; 103 104 mutex_lock(&uctx->lock); 105 if (!list_empty(&uctx->cqids)) { 106 entry = list_entry(uctx->cqids.next, struct c4iw_qid_list, 107 entry); 108 list_del(&entry->entry); 109 qid = entry->qid; 110 kfree(entry); 111 } else { 112 qid = c4iw_get_resource(&rdev->resource.qid_table); 113 if (!qid) 114 goto out; 115 mutex_lock(&rdev->stats.lock); 116 rdev->stats.qid.cur += rdev->qpmask + 1; 117 mutex_unlock(&rdev->stats.lock); 118 for (i = qid+1; i & rdev->qpmask; i++) { 119 entry = kmalloc(sizeof *entry, GFP_KERNEL); 120 if (!entry) 121 goto out; 122 entry->qid = i; 123 list_add_tail(&entry->entry, &uctx->cqids); 124 } 125 126 /* 127 * now put the same ids on the qp list since they all 128 * map to the same db/gts page. 129 */ 130 entry = kmalloc(sizeof *entry, GFP_KERNEL); 131 if (!entry) 132 goto out; 133 entry->qid = qid; 134 list_add_tail(&entry->entry, &uctx->qpids); 135 for (i = qid+1; i & rdev->qpmask; i++) { 136 entry = kmalloc(sizeof *entry, GFP_KERNEL); 137 if (!entry) 138 goto out; 139 entry->qid = i; 140 list_add_tail(&entry->entry, &uctx->qpids); 141 } 142 } 143 out: 144 mutex_unlock(&uctx->lock); 145 CTR2(KTR_IW_CXGBE, "%s: qid 0x%x", __func__, qid); 146 mutex_lock(&rdev->stats.lock); 147 if (rdev->stats.qid.cur > rdev->stats.qid.max) 148 rdev->stats.qid.max = rdev->stats.qid.cur; 149 mutex_unlock(&rdev->stats.lock); 150 return qid; 151 } 152 153 void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid, 154 struct c4iw_dev_ucontext *uctx) 155 { 156 struct c4iw_qid_list *entry; 157 158 entry = kmalloc(sizeof *entry, GFP_KERNEL); 159 if (!entry) 160 return; 161 CTR2(KTR_IW_CXGBE, "%s qid 0x%x", __func__, qid); 162 entry->qid = qid; 163 mutex_lock(&uctx->lock); 164 list_add_tail(&entry->entry, &uctx->cqids); 165 mutex_unlock(&uctx->lock); 166 } 167 168 u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx) 169 { 170 struct c4iw_qid_list *entry; 171 u32 qid; 172 int i; 173 174 mutex_lock(&uctx->lock); 175 if (!list_empty(&uctx->qpids)) { 176 entry = list_entry(uctx->qpids.next, struct c4iw_qid_list, 177 entry); 178 list_del(&entry->entry); 179 qid = entry->qid; 180 kfree(entry); 181 } else { 182 qid = c4iw_get_resource(&rdev->resource.qid_table); 183 if (!qid) 184 goto out; 185 mutex_lock(&rdev->stats.lock); 186 rdev->stats.qid.cur += rdev->qpmask + 1; 187 mutex_unlock(&rdev->stats.lock); 188 for (i = qid+1; i & rdev->qpmask; i++) { 189 entry = kmalloc(sizeof *entry, GFP_KERNEL); 190 if (!entry) 191 goto out; 192 entry->qid = i; 193 list_add_tail(&entry->entry, &uctx->qpids); 194 } 195 196 /* 197 * now put the same ids on the cq list since they all 198 * map to the same db/gts page. 199 */ 200 entry = kmalloc(sizeof *entry, GFP_KERNEL); 201 if (!entry) 202 goto out; 203 entry->qid = qid; 204 list_add_tail(&entry->entry, &uctx->cqids); 205 for (i = qid; i & rdev->qpmask; i++) { 206 entry = kmalloc(sizeof *entry, GFP_KERNEL); 207 if (!entry) 208 goto out; 209 entry->qid = i; 210 list_add_tail(&entry->entry, &uctx->cqids); 211 } 212 } 213 out: 214 mutex_unlock(&uctx->lock); 215 CTR2(KTR_IW_CXGBE, "%s qid 0x%x", __func__, qid); 216 mutex_lock(&rdev->stats.lock); 217 if (rdev->stats.qid.cur > rdev->stats.qid.max) 218 rdev->stats.qid.max = rdev->stats.qid.cur; 219 mutex_unlock(&rdev->stats.lock); 220 return qid; 221 } 222 223 void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid, 224 struct c4iw_dev_ucontext *uctx) 225 { 226 struct c4iw_qid_list *entry; 227 228 entry = kmalloc(sizeof *entry, GFP_KERNEL); 229 if (!entry) 230 return; 231 CTR2(KTR_IW_CXGBE, "%s qid 0x%x", __func__, qid); 232 entry->qid = qid; 233 mutex_lock(&uctx->lock); 234 list_add_tail(&entry->entry, &uctx->qpids); 235 mutex_unlock(&uctx->lock); 236 } 237 238 void c4iw_destroy_resource(struct c4iw_resource *rscp) 239 { 240 c4iw_id_table_free(&rscp->qid_table); 241 c4iw_id_table_free(&rscp->pdid_table); 242 } 243 244 /* PBL Memory Manager. */ 245 246 #define MIN_PBL_SHIFT 5 /* 32B == min PBL size (4 entries) */ 247 248 u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size) 249 { 250 u32 addr; 251 252 addr = t4_pblpool_alloc(rdev->adap, size); 253 mutex_lock(&rdev->stats.lock); 254 if (addr) { 255 rdev->stats.pbl.cur += roundup(size, 1 << MIN_PBL_SHIFT); 256 if (rdev->stats.pbl.cur > rdev->stats.pbl.max) 257 rdev->stats.pbl.max = rdev->stats.pbl.cur; 258 } else 259 rdev->stats.pbl.fail++; 260 mutex_unlock(&rdev->stats.lock); 261 return addr; 262 } 263 264 void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size) 265 { 266 mutex_lock(&rdev->stats.lock); 267 rdev->stats.pbl.cur -= roundup(size, 1 << MIN_PBL_SHIFT); 268 mutex_unlock(&rdev->stats.lock); 269 t4_pblpool_free(rdev->adap, addr, size); 270 } 271 272 /* RQT Memory Manager. */ 273 274 #define MIN_RQT_SHIFT 10 /* 1KB == min RQT size (16 entries) */ 275 276 u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size) 277 { 278 unsigned long addr; 279 280 vmem_xalloc(rdev->rqt_arena, 281 roundup((size << 6),(1 << MIN_RQT_SHIFT)), 282 4, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX, 283 M_FIRSTFIT|M_NOWAIT, &addr); 284 CTR3(KTR_IW_CXGBE, "%s addr 0x%x size %d", __func__, (u32)addr, 285 size << 6); 286 if (!addr) 287 printf("%s: Out of RQT memory\n", 288 device_get_nameunit(rdev->adap->dev)); 289 mutex_lock(&rdev->stats.lock); 290 if (addr) { 291 rdev->stats.rqt.cur += roundup(size << 6, 1 << MIN_RQT_SHIFT); 292 if (rdev->stats.rqt.cur > rdev->stats.rqt.max) 293 rdev->stats.rqt.max = rdev->stats.rqt.cur; 294 } else 295 rdev->stats.rqt.fail++; 296 mutex_unlock(&rdev->stats.lock); 297 return (u32)addr; 298 } 299 300 void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size) 301 { 302 CTR3(KTR_IW_CXGBE, "%s addr 0x%x size %d", __func__, addr, size << 6); 303 mutex_lock(&rdev->stats.lock); 304 rdev->stats.rqt.cur -= roundup(size << 6, 1 << MIN_RQT_SHIFT); 305 mutex_unlock(&rdev->stats.lock); 306 vmem_xfree(rdev->rqt_arena, addr, 307 roundup((size << 6),(1 << MIN_RQT_SHIFT))); 308 } 309 310 int c4iw_rqtpool_create(struct c4iw_rdev *rdev) 311 { 312 rdev->rqt_arena = vmem_create("RQT_MEM_POOL", 313 rdev->adap->vres.rq.start, 314 rdev->adap->vres.rq.size, 315 1, 0, M_FIRSTFIT| M_NOWAIT); 316 if (!rdev->rqt_arena) 317 return -ENOMEM; 318 319 return 0; 320 } 321 322 void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev) 323 { 324 vmem_destroy(rdev->rqt_arena); 325 } 326 #endif 327