1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34 /* Crude resource management */
35 #include <sys/cdefs.h>
36 #include "opt_inet.h"
37
38 #ifdef TCP_OFFLOAD
39 #include <linux/spinlock.h>
40 #include "iw_cxgbe.h"
41
c4iw_init_qid_table(struct c4iw_rdev * rdev)42 static int c4iw_init_qid_table(struct c4iw_rdev *rdev)
43 {
44 u32 i;
45
46 if (c4iw_id_table_alloc(&rdev->resource.qid_table,
47 rdev->adap->vres.qp.start,
48 rdev->adap->vres.qp.size,
49 rdev->adap->vres.qp.size, 0)) {
50 printf("%s: return ENOMEM\n", __func__);
51 return -ENOMEM;
52 }
53
54 for (i = rdev->adap->vres.qp.start;
55 i < rdev->adap->vres.qp.start + rdev->adap->vres.qp.size; i++)
56 if (!(i & rdev->qpmask))
57 c4iw_id_free(&rdev->resource.qid_table, i);
58 return 0;
59 }
60
61 /* nr_* must be power of 2 */
c4iw_init_resource(struct c4iw_rdev * rdev,u32 nr_tpt,u32 nr_pdid)62 int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid)
63 {
64 int err = 0;
65 err = c4iw_id_table_alloc(&rdev->resource.tpt_table, 0, nr_tpt, 1,
66 C4IW_ID_TABLE_F_RANDOM);
67 if (err)
68 goto tpt_err;
69 err = c4iw_init_qid_table(rdev);
70 if (err)
71 goto qid_err;
72 err = c4iw_id_table_alloc(&rdev->resource.pdid_table, 0,
73 nr_pdid, 1, 0);
74 if (err)
75 goto pdid_err;
76 return 0;
77 pdid_err:
78 c4iw_id_table_free(&rdev->resource.qid_table);
79 qid_err:
80 c4iw_id_table_free(&rdev->resource.tpt_table);
81 tpt_err:
82 return -ENOMEM;
83 }
84
85 /*
86 * returns 0 if no resource available
87 */
c4iw_get_resource(struct c4iw_id_table * id_table)88 u32 c4iw_get_resource(struct c4iw_id_table *id_table)
89 {
90 u32 entry;
91 entry = c4iw_id_alloc(id_table);
92 if (entry == (u32)(-1)) {
93 return 0;
94 }
95 return entry;
96 }
97
c4iw_put_resource(struct c4iw_id_table * id_table,u32 entry)98 void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry)
99 {
100 CTR2(KTR_IW_CXGBE, "%s entry 0x%x", __func__, entry);
101 c4iw_id_free(id_table, entry);
102 }
103
c4iw_get_cqid(struct c4iw_rdev * rdev,struct c4iw_dev_ucontext * uctx)104 u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
105 {
106 struct c4iw_qid_list *entry;
107 u32 qid;
108 int i;
109
110 mutex_lock(&uctx->lock);
111 if (!list_empty(&uctx->cqids)) {
112 entry = list_entry(uctx->cqids.next, struct c4iw_qid_list,
113 entry);
114 list_del(&entry->entry);
115 qid = entry->qid;
116 kfree(entry);
117 } else {
118 qid = c4iw_get_resource(&rdev->resource.qid_table);
119 if (!qid)
120 goto out;
121 mutex_lock(&rdev->stats.lock);
122 rdev->stats.qid.cur += rdev->qpmask + 1;
123 mutex_unlock(&rdev->stats.lock);
124 for (i = qid+1; i & rdev->qpmask; i++) {
125 entry = kmalloc(sizeof *entry, GFP_KERNEL);
126 if (!entry)
127 goto out;
128 entry->qid = i;
129 list_add_tail(&entry->entry, &uctx->cqids);
130 }
131
132 /*
133 * now put the same ids on the qp list since they all
134 * map to the same db/gts page.
135 */
136 entry = kmalloc(sizeof *entry, GFP_KERNEL);
137 if (!entry)
138 goto out;
139 entry->qid = qid;
140 list_add_tail(&entry->entry, &uctx->qpids);
141 for (i = qid+1; i & rdev->qpmask; i++) {
142 entry = kmalloc(sizeof *entry, GFP_KERNEL);
143 if (!entry)
144 goto out;
145 entry->qid = i;
146 list_add_tail(&entry->entry, &uctx->qpids);
147 }
148 }
149 out:
150 mutex_unlock(&uctx->lock);
151 CTR2(KTR_IW_CXGBE, "%s: qid 0x%x", __func__, qid);
152 mutex_lock(&rdev->stats.lock);
153 if (rdev->stats.qid.cur > rdev->stats.qid.max)
154 rdev->stats.qid.max = rdev->stats.qid.cur;
155 mutex_unlock(&rdev->stats.lock);
156 return qid;
157 }
158
c4iw_put_cqid(struct c4iw_rdev * rdev,u32 qid,struct c4iw_dev_ucontext * uctx)159 void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
160 struct c4iw_dev_ucontext *uctx)
161 {
162 struct c4iw_qid_list *entry;
163
164 entry = kmalloc(sizeof *entry, GFP_KERNEL);
165 if (!entry)
166 return;
167 CTR2(KTR_IW_CXGBE, "%s qid 0x%x", __func__, qid);
168 entry->qid = qid;
169 mutex_lock(&uctx->lock);
170 list_add_tail(&entry->entry, &uctx->cqids);
171 mutex_unlock(&uctx->lock);
172 }
173
c4iw_get_qpid(struct c4iw_rdev * rdev,struct c4iw_dev_ucontext * uctx)174 u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
175 {
176 struct c4iw_qid_list *entry;
177 u32 qid;
178 int i;
179
180 mutex_lock(&uctx->lock);
181 if (!list_empty(&uctx->qpids)) {
182 entry = list_entry(uctx->qpids.next, struct c4iw_qid_list,
183 entry);
184 list_del(&entry->entry);
185 qid = entry->qid;
186 kfree(entry);
187 } else {
188 qid = c4iw_get_resource(&rdev->resource.qid_table);
189 if (!qid)
190 goto out;
191 mutex_lock(&rdev->stats.lock);
192 rdev->stats.qid.cur += rdev->qpmask + 1;
193 mutex_unlock(&rdev->stats.lock);
194 for (i = qid+1; i & rdev->qpmask; i++) {
195 entry = kmalloc(sizeof *entry, GFP_KERNEL);
196 if (!entry)
197 goto out;
198 entry->qid = i;
199 list_add_tail(&entry->entry, &uctx->qpids);
200 }
201
202 /*
203 * now put the same ids on the cq list since they all
204 * map to the same db/gts page.
205 */
206 entry = kmalloc(sizeof *entry, GFP_KERNEL);
207 if (!entry)
208 goto out;
209 entry->qid = qid;
210 list_add_tail(&entry->entry, &uctx->cqids);
211 for (i = qid; i & rdev->qpmask; i++) {
212 entry = kmalloc(sizeof *entry, GFP_KERNEL);
213 if (!entry)
214 goto out;
215 entry->qid = i;
216 list_add_tail(&entry->entry, &uctx->cqids);
217 }
218 }
219 out:
220 mutex_unlock(&uctx->lock);
221 CTR2(KTR_IW_CXGBE, "%s qid 0x%x", __func__, qid);
222 mutex_lock(&rdev->stats.lock);
223 if (rdev->stats.qid.cur > rdev->stats.qid.max)
224 rdev->stats.qid.max = rdev->stats.qid.cur;
225 mutex_unlock(&rdev->stats.lock);
226 return qid;
227 }
228
c4iw_put_qpid(struct c4iw_rdev * rdev,u32 qid,struct c4iw_dev_ucontext * uctx)229 void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
230 struct c4iw_dev_ucontext *uctx)
231 {
232 struct c4iw_qid_list *entry;
233
234 entry = kmalloc(sizeof *entry, GFP_KERNEL);
235 if (!entry)
236 return;
237 CTR2(KTR_IW_CXGBE, "%s qid 0x%x", __func__, qid);
238 entry->qid = qid;
239 mutex_lock(&uctx->lock);
240 list_add_tail(&entry->entry, &uctx->qpids);
241 mutex_unlock(&uctx->lock);
242 }
243
c4iw_destroy_resource(struct c4iw_resource * rscp)244 void c4iw_destroy_resource(struct c4iw_resource *rscp)
245 {
246 c4iw_id_table_free(&rscp->tpt_table);
247 c4iw_id_table_free(&rscp->qid_table);
248 c4iw_id_table_free(&rscp->pdid_table);
249 }
250
251 /* PBL Memory Manager. */
252
253 #define MIN_PBL_SHIFT 5 /* 32B == min PBL size (4 entries) */
254
c4iw_pblpool_alloc(struct c4iw_rdev * rdev,int size)255 u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
256 {
257 unsigned long addr;
258
259 vmem_xalloc(rdev->pbl_arena, roundup(size, (1 << MIN_PBL_SHIFT)),
260 4, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX,
261 M_FIRSTFIT|M_NOWAIT, &addr);
262 CTR3(KTR_IW_CXGBE, "%s addr 0x%x size %d", __func__, (u32)addr, size);
263 mutex_lock(&rdev->stats.lock);
264 if (addr) {
265 rdev->stats.pbl.cur += roundup(size, 1 << MIN_PBL_SHIFT);
266 if (rdev->stats.pbl.cur > rdev->stats.pbl.max)
267 rdev->stats.pbl.max = rdev->stats.pbl.cur;
268 } else
269 rdev->stats.pbl.fail++;
270 mutex_unlock(&rdev->stats.lock);
271 return (u32)addr;
272 }
273
c4iw_pblpool_free(struct c4iw_rdev * rdev,u32 addr,int size)274 void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
275 {
276 CTR3(KTR_IW_CXGBE, "%s addr 0x%x size %d", __func__, addr, size);
277 mutex_lock(&rdev->stats.lock);
278 rdev->stats.pbl.cur -= roundup(size, 1 << MIN_PBL_SHIFT);
279 mutex_unlock(&rdev->stats.lock);
280 vmem_xfree(rdev->pbl_arena, addr, roundup(size,(1 << MIN_PBL_SHIFT)));
281 }
282
c4iw_pblpool_create(struct c4iw_rdev * rdev)283 int c4iw_pblpool_create(struct c4iw_rdev *rdev)
284 {
285 rdev->pbl_arena = vmem_create("PBL_MEM_POOL",
286 rdev->adap->vres.pbl.start,
287 rdev->adap->vres.pbl.size,
288 1, 0, M_FIRSTFIT| M_NOWAIT);
289 if (!rdev->pbl_arena)
290 return -ENOMEM;
291
292 return 0;
293 }
294
c4iw_pblpool_destroy(struct c4iw_rdev * rdev)295 void c4iw_pblpool_destroy(struct c4iw_rdev *rdev)
296 {
297 vmem_destroy(rdev->pbl_arena);
298 }
299
300 /* RQT Memory Manager. */
301
302 #define MIN_RQT_SHIFT 10 /* 1KB == min RQT size (16 entries) */
303
c4iw_rqtpool_alloc(struct c4iw_rdev * rdev,int size)304 u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size)
305 {
306 unsigned long addr;
307
308 vmem_xalloc(rdev->rqt_arena,
309 roundup((size << 6),(1 << MIN_RQT_SHIFT)),
310 4, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX,
311 M_FIRSTFIT|M_NOWAIT, &addr);
312 CTR3(KTR_IW_CXGBE, "%s addr 0x%x size %d", __func__, (u32)addr,
313 size << 6);
314 if (!addr)
315 printf("%s: Out of RQT memory\n",
316 device_get_nameunit(rdev->adap->dev));
317 mutex_lock(&rdev->stats.lock);
318 if (addr) {
319 rdev->stats.rqt.cur += roundup(size << 6, 1 << MIN_RQT_SHIFT);
320 if (rdev->stats.rqt.cur > rdev->stats.rqt.max)
321 rdev->stats.rqt.max = rdev->stats.rqt.cur;
322 } else
323 rdev->stats.rqt.fail++;
324 mutex_unlock(&rdev->stats.lock);
325 return (u32)addr;
326 }
327
c4iw_rqtpool_free(struct c4iw_rdev * rdev,u32 addr,int size)328 void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
329 {
330 CTR3(KTR_IW_CXGBE, "%s addr 0x%x size %d", __func__, addr, size << 6);
331 mutex_lock(&rdev->stats.lock);
332 rdev->stats.rqt.cur -= roundup(size << 6, 1 << MIN_RQT_SHIFT);
333 mutex_unlock(&rdev->stats.lock);
334 vmem_xfree(rdev->rqt_arena, addr,
335 roundup((size << 6),(1 << MIN_RQT_SHIFT)));
336 }
337
c4iw_rqtpool_create(struct c4iw_rdev * rdev)338 int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
339 {
340 rdev->rqt_arena = vmem_create("RQT_MEM_POOL",
341 rdev->adap->vres.rq.start,
342 rdev->adap->vres.rq.size,
343 1, 0, M_FIRSTFIT| M_NOWAIT);
344 if (!rdev->rqt_arena)
345 return -ENOMEM;
346
347 return 0;
348 }
349
c4iw_rqtpool_destroy(struct c4iw_rdev * rdev)350 void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev)
351 {
352 vmem_destroy(rdev->rqt_arena);
353 }
354 #endif
355