1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* 3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. 4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. 5 */ 6 7 #include "rxe.h" 8 #include "rxe_loc.h" 9 10 static const struct rxe_type_info { 11 const char *name; 12 size_t size; 13 size_t elem_offset; 14 void (*cleanup)(struct rxe_pool_entry *obj); 15 enum rxe_pool_flags flags; 16 u32 min_index; 17 u32 max_index; 18 size_t key_offset; 19 size_t key_size; 20 } rxe_type_info[RXE_NUM_TYPES] = { 21 [RXE_TYPE_UC] = { 22 .name = "rxe-uc", 23 .size = sizeof(struct rxe_ucontext), 24 .elem_offset = offsetof(struct rxe_ucontext, pelem), 25 .flags = RXE_POOL_NO_ALLOC, 26 }, 27 [RXE_TYPE_PD] = { 28 .name = "rxe-pd", 29 .size = sizeof(struct rxe_pd), 30 .elem_offset = offsetof(struct rxe_pd, pelem), 31 .flags = RXE_POOL_NO_ALLOC, 32 }, 33 [RXE_TYPE_AH] = { 34 .name = "rxe-ah", 35 .size = sizeof(struct rxe_ah), 36 .elem_offset = offsetof(struct rxe_ah, pelem), 37 .flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC, 38 .min_index = RXE_MIN_AH_INDEX, 39 .max_index = RXE_MAX_AH_INDEX, 40 }, 41 [RXE_TYPE_SRQ] = { 42 .name = "rxe-srq", 43 .size = sizeof(struct rxe_srq), 44 .elem_offset = offsetof(struct rxe_srq, pelem), 45 .flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC, 46 .min_index = RXE_MIN_SRQ_INDEX, 47 .max_index = RXE_MAX_SRQ_INDEX, 48 }, 49 [RXE_TYPE_QP] = { 50 .name = "rxe-qp", 51 .size = sizeof(struct rxe_qp), 52 .elem_offset = offsetof(struct rxe_qp, pelem), 53 .cleanup = rxe_qp_cleanup, 54 .flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC, 55 .min_index = RXE_MIN_QP_INDEX, 56 .max_index = RXE_MAX_QP_INDEX, 57 }, 58 [RXE_TYPE_CQ] = { 59 .name = "rxe-cq", 60 .size = sizeof(struct rxe_cq), 61 .elem_offset = offsetof(struct rxe_cq, pelem), 62 .flags = RXE_POOL_NO_ALLOC, 63 .cleanup = rxe_cq_cleanup, 64 }, 65 [RXE_TYPE_MR] = { 66 .name = "rxe-mr", 67 .size = sizeof(struct rxe_mr), 68 .elem_offset = offsetof(struct rxe_mr, pelem), 69 .cleanup = rxe_mr_cleanup, 70 .flags = RXE_POOL_INDEX, 71 .min_index = RXE_MIN_MR_INDEX, 72 .max_index = RXE_MAX_MR_INDEX, 73 }, 74 [RXE_TYPE_MW] = { 75 .name = "rxe-mw", 76 .size = sizeof(struct rxe_mw), 77 .elem_offset = offsetof(struct rxe_mw, pelem), 78 .cleanup = rxe_mw_cleanup, 79 .flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC, 80 .min_index = RXE_MIN_MW_INDEX, 81 .max_index = RXE_MAX_MW_INDEX, 82 }, 83 [RXE_TYPE_MC_GRP] = { 84 .name = "rxe-mc_grp", 85 .size = sizeof(struct rxe_mc_grp), 86 .elem_offset = offsetof(struct rxe_mc_grp, pelem), 87 .cleanup = rxe_mc_cleanup, 88 .flags = RXE_POOL_KEY, 89 .key_offset = offsetof(struct rxe_mc_grp, mgid), 90 .key_size = sizeof(union ib_gid), 91 }, 92 [RXE_TYPE_MC_ELEM] = { 93 .name = "rxe-mc_elem", 94 .size = sizeof(struct rxe_mc_elem), 95 .elem_offset = offsetof(struct rxe_mc_elem, pelem), 96 }, 97 }; 98 99 static inline const char *pool_name(struct rxe_pool *pool) 100 { 101 return rxe_type_info[pool->type].name; 102 } 103 104 static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min) 105 { 106 int err = 0; 107 108 if ((max - min + 1) < pool->max_elem) { 109 pr_warn("not enough indices for max_elem\n"); 110 err = -EINVAL; 111 goto out; 112 } 113 114 pool->index.max_index = max; 115 pool->index.min_index = min; 116 117 pool->index.table = bitmap_zalloc(max - min + 1, GFP_KERNEL); 118 if (!pool->index.table) { 119 err = -ENOMEM; 120 goto out; 121 } 122 123 out: 124 return err; 125 } 126 127 int rxe_pool_init( 128 struct rxe_dev *rxe, 129 struct rxe_pool *pool, 130 enum rxe_elem_type type, 131 unsigned int max_elem) 132 { 133 int err = 0; 134 size_t size = rxe_type_info[type].size; 135 136 memset(pool, 0, sizeof(*pool)); 137 138 pool->rxe = rxe; 139 pool->type = type; 140 pool->max_elem = max_elem; 141 pool->elem_size = ALIGN(size, RXE_POOL_ALIGN); 142 pool->flags = rxe_type_info[type].flags; 143 pool->index.tree = RB_ROOT; 144 pool->key.tree = RB_ROOT; 145 pool->cleanup = rxe_type_info[type].cleanup; 146 147 atomic_set(&pool->num_elem, 0); 148 149 rwlock_init(&pool->pool_lock); 150 151 if (rxe_type_info[type].flags & RXE_POOL_INDEX) { 152 err = rxe_pool_init_index(pool, 153 rxe_type_info[type].max_index, 154 rxe_type_info[type].min_index); 155 if (err) 156 goto out; 157 } 158 159 if (rxe_type_info[type].flags & RXE_POOL_KEY) { 160 pool->key.key_offset = rxe_type_info[type].key_offset; 161 pool->key.key_size = rxe_type_info[type].key_size; 162 } 163 164 out: 165 return err; 166 } 167 168 void rxe_pool_cleanup(struct rxe_pool *pool) 169 { 170 if (atomic_read(&pool->num_elem) > 0) 171 pr_warn("%s pool destroyed with unfree'd elem\n", 172 pool_name(pool)); 173 174 bitmap_free(pool->index.table); 175 } 176 177 static u32 alloc_index(struct rxe_pool *pool) 178 { 179 u32 index; 180 u32 range = pool->index.max_index - pool->index.min_index + 1; 181 182 index = find_next_zero_bit(pool->index.table, range, pool->index.last); 183 if (index >= range) 184 index = find_first_zero_bit(pool->index.table, range); 185 186 WARN_ON_ONCE(index >= range); 187 set_bit(index, pool->index.table); 188 pool->index.last = index; 189 return index + pool->index.min_index; 190 } 191 192 static int rxe_insert_index(struct rxe_pool *pool, struct rxe_pool_entry *new) 193 { 194 struct rb_node **link = &pool->index.tree.rb_node; 195 struct rb_node *parent = NULL; 196 struct rxe_pool_entry *elem; 197 198 while (*link) { 199 parent = *link; 200 elem = rb_entry(parent, struct rxe_pool_entry, index_node); 201 202 if (elem->index == new->index) { 203 pr_warn("element already exists!\n"); 204 return -EINVAL; 205 } 206 207 if (elem->index > new->index) 208 link = &(*link)->rb_left; 209 else 210 link = &(*link)->rb_right; 211 } 212 213 rb_link_node(&new->index_node, parent, link); 214 rb_insert_color(&new->index_node, &pool->index.tree); 215 216 return 0; 217 } 218 219 static int rxe_insert_key(struct rxe_pool *pool, struct rxe_pool_entry *new) 220 { 221 struct rb_node **link = &pool->key.tree.rb_node; 222 struct rb_node *parent = NULL; 223 struct rxe_pool_entry *elem; 224 int cmp; 225 226 while (*link) { 227 parent = *link; 228 elem = rb_entry(parent, struct rxe_pool_entry, key_node); 229 230 cmp = memcmp((u8 *)elem + pool->key.key_offset, 231 (u8 *)new + pool->key.key_offset, pool->key.key_size); 232 233 if (cmp == 0) { 234 pr_warn("key already exists!\n"); 235 return -EINVAL; 236 } 237 238 if (cmp > 0) 239 link = &(*link)->rb_left; 240 else 241 link = &(*link)->rb_right; 242 } 243 244 rb_link_node(&new->key_node, parent, link); 245 rb_insert_color(&new->key_node, &pool->key.tree); 246 247 return 0; 248 } 249 250 int __rxe_add_key_locked(struct rxe_pool_entry *elem, void *key) 251 { 252 struct rxe_pool *pool = elem->pool; 253 int err; 254 255 memcpy((u8 *)elem + pool->key.key_offset, key, pool->key.key_size); 256 err = rxe_insert_key(pool, elem); 257 258 return err; 259 } 260 261 int __rxe_add_key(struct rxe_pool_entry *elem, void *key) 262 { 263 struct rxe_pool *pool = elem->pool; 264 unsigned long flags; 265 int err; 266 267 write_lock_irqsave(&pool->pool_lock, flags); 268 err = __rxe_add_key_locked(elem, key); 269 write_unlock_irqrestore(&pool->pool_lock, flags); 270 271 return err; 272 } 273 274 void __rxe_drop_key_locked(struct rxe_pool_entry *elem) 275 { 276 struct rxe_pool *pool = elem->pool; 277 278 rb_erase(&elem->key_node, &pool->key.tree); 279 } 280 281 void __rxe_drop_key(struct rxe_pool_entry *elem) 282 { 283 struct rxe_pool *pool = elem->pool; 284 unsigned long flags; 285 286 write_lock_irqsave(&pool->pool_lock, flags); 287 __rxe_drop_key_locked(elem); 288 write_unlock_irqrestore(&pool->pool_lock, flags); 289 } 290 291 int __rxe_add_index_locked(struct rxe_pool_entry *elem) 292 { 293 struct rxe_pool *pool = elem->pool; 294 int err; 295 296 elem->index = alloc_index(pool); 297 err = rxe_insert_index(pool, elem); 298 299 return err; 300 } 301 302 int __rxe_add_index(struct rxe_pool_entry *elem) 303 { 304 struct rxe_pool *pool = elem->pool; 305 unsigned long flags; 306 int err; 307 308 write_lock_irqsave(&pool->pool_lock, flags); 309 err = __rxe_add_index_locked(elem); 310 write_unlock_irqrestore(&pool->pool_lock, flags); 311 312 return err; 313 } 314 315 void __rxe_drop_index_locked(struct rxe_pool_entry *elem) 316 { 317 struct rxe_pool *pool = elem->pool; 318 319 clear_bit(elem->index - pool->index.min_index, pool->index.table); 320 rb_erase(&elem->index_node, &pool->index.tree); 321 } 322 323 void __rxe_drop_index(struct rxe_pool_entry *elem) 324 { 325 struct rxe_pool *pool = elem->pool; 326 unsigned long flags; 327 328 write_lock_irqsave(&pool->pool_lock, flags); 329 __rxe_drop_index_locked(elem); 330 write_unlock_irqrestore(&pool->pool_lock, flags); 331 } 332 333 void *rxe_alloc_locked(struct rxe_pool *pool) 334 { 335 const struct rxe_type_info *info = &rxe_type_info[pool->type]; 336 struct rxe_pool_entry *elem; 337 u8 *obj; 338 339 if (atomic_inc_return(&pool->num_elem) > pool->max_elem) 340 goto out_cnt; 341 342 obj = kzalloc(info->size, GFP_ATOMIC); 343 if (!obj) 344 goto out_cnt; 345 346 elem = (struct rxe_pool_entry *)(obj + info->elem_offset); 347 348 elem->pool = pool; 349 kref_init(&elem->ref_cnt); 350 351 return obj; 352 353 out_cnt: 354 atomic_dec(&pool->num_elem); 355 return NULL; 356 } 357 358 void *rxe_alloc(struct rxe_pool *pool) 359 { 360 const struct rxe_type_info *info = &rxe_type_info[pool->type]; 361 struct rxe_pool_entry *elem; 362 u8 *obj; 363 364 if (atomic_inc_return(&pool->num_elem) > pool->max_elem) 365 goto out_cnt; 366 367 obj = kzalloc(info->size, GFP_KERNEL); 368 if (!obj) 369 goto out_cnt; 370 371 elem = (struct rxe_pool_entry *)(obj + info->elem_offset); 372 373 elem->pool = pool; 374 kref_init(&elem->ref_cnt); 375 376 return obj; 377 378 out_cnt: 379 atomic_dec(&pool->num_elem); 380 return NULL; 381 } 382 383 int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem) 384 { 385 if (atomic_inc_return(&pool->num_elem) > pool->max_elem) 386 goto out_cnt; 387 388 elem->pool = pool; 389 kref_init(&elem->ref_cnt); 390 391 return 0; 392 393 out_cnt: 394 atomic_dec(&pool->num_elem); 395 return -EINVAL; 396 } 397 398 void rxe_elem_release(struct kref *kref) 399 { 400 struct rxe_pool_entry *elem = 401 container_of(kref, struct rxe_pool_entry, ref_cnt); 402 struct rxe_pool *pool = elem->pool; 403 const struct rxe_type_info *info = &rxe_type_info[pool->type]; 404 u8 *obj; 405 406 if (pool->cleanup) 407 pool->cleanup(elem); 408 409 if (!(pool->flags & RXE_POOL_NO_ALLOC)) { 410 obj = (u8 *)elem - info->elem_offset; 411 kfree(obj); 412 } 413 414 atomic_dec(&pool->num_elem); 415 } 416 417 void *rxe_pool_get_index_locked(struct rxe_pool *pool, u32 index) 418 { 419 const struct rxe_type_info *info = &rxe_type_info[pool->type]; 420 struct rb_node *node; 421 struct rxe_pool_entry *elem; 422 u8 *obj; 423 424 node = pool->index.tree.rb_node; 425 426 while (node) { 427 elem = rb_entry(node, struct rxe_pool_entry, index_node); 428 429 if (elem->index > index) 430 node = node->rb_left; 431 else if (elem->index < index) 432 node = node->rb_right; 433 else 434 break; 435 } 436 437 if (node) { 438 kref_get(&elem->ref_cnt); 439 obj = (u8 *)elem - info->elem_offset; 440 } else { 441 obj = NULL; 442 } 443 444 return obj; 445 } 446 447 void *rxe_pool_get_index(struct rxe_pool *pool, u32 index) 448 { 449 u8 *obj; 450 unsigned long flags; 451 452 read_lock_irqsave(&pool->pool_lock, flags); 453 obj = rxe_pool_get_index_locked(pool, index); 454 read_unlock_irqrestore(&pool->pool_lock, flags); 455 456 return obj; 457 } 458 459 void *rxe_pool_get_key_locked(struct rxe_pool *pool, void *key) 460 { 461 const struct rxe_type_info *info = &rxe_type_info[pool->type]; 462 struct rb_node *node; 463 struct rxe_pool_entry *elem; 464 u8 *obj; 465 int cmp; 466 467 node = pool->key.tree.rb_node; 468 469 while (node) { 470 elem = rb_entry(node, struct rxe_pool_entry, key_node); 471 472 cmp = memcmp((u8 *)elem + pool->key.key_offset, 473 key, pool->key.key_size); 474 475 if (cmp > 0) 476 node = node->rb_left; 477 else if (cmp < 0) 478 node = node->rb_right; 479 else 480 break; 481 } 482 483 if (node) { 484 kref_get(&elem->ref_cnt); 485 obj = (u8 *)elem - info->elem_offset; 486 } else { 487 obj = NULL; 488 } 489 490 return obj; 491 } 492 493 void *rxe_pool_get_key(struct rxe_pool *pool, void *key) 494 { 495 u8 *obj; 496 unsigned long flags; 497 498 read_lock_irqsave(&pool->pool_lock, flags); 499 obj = rxe_pool_get_key_locked(pool, key); 500 read_unlock_irqrestore(&pool->pool_lock, flags); 501 502 return obj; 503 } 504