1 /* 2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/security.h> 34 #include <linux/completion.h> 35 #include <linux/list.h> 36 37 #include <rdma/ib_verbs.h> 38 #include <rdma/ib_cache.h> 39 #include "core_priv.h" 40 #include "mad_priv.h" 41 42 static LIST_HEAD(mad_agent_list); 43 /* Lock to protect mad_agent_list */ 44 static DEFINE_SPINLOCK(mad_agent_list_lock); 45 46 static struct pkey_index_qp_list *get_pkey_idx_qp_list(struct ib_port_pkey *pp) 47 { 48 struct pkey_index_qp_list *pkey = NULL; 49 struct pkey_index_qp_list *tmp_pkey; 50 struct ib_device *dev = pp->sec->dev; 51 52 spin_lock(&dev->port_data[pp->port_num].pkey_list_lock); 53 list_for_each_entry (tmp_pkey, &dev->port_data[pp->port_num].pkey_list, 54 pkey_index_list) { 55 if (tmp_pkey->pkey_index == pp->pkey_index) { 56 pkey = tmp_pkey; 57 break; 58 } 59 } 60 spin_unlock(&dev->port_data[pp->port_num].pkey_list_lock); 61 return pkey; 62 } 63 64 static int get_pkey_and_subnet_prefix(struct ib_port_pkey *pp, 65 u16 *pkey, 66 u64 *subnet_prefix) 67 { 68 struct ib_device *dev = pp->sec->dev; 69 int ret; 70 71 ret = ib_get_cached_pkey(dev, pp->port_num, pp->pkey_index, pkey); 72 if (ret) 73 return ret; 74 75 ret = ib_get_cached_subnet_prefix(dev, pp->port_num, subnet_prefix); 76 77 return ret; 78 } 79 80 static int enforce_qp_pkey_security(u16 pkey, 81 u64 subnet_prefix, 82 struct ib_qp_security *qp_sec) 83 { 84 struct ib_qp_security *shared_qp_sec; 85 int ret; 86 87 ret = security_ib_pkey_access(qp_sec->security, subnet_prefix, pkey); 88 if (ret) 89 return ret; 90 91 list_for_each_entry(shared_qp_sec, 92 &qp_sec->shared_qp_list, 93 shared_qp_list) { 94 ret = security_ib_pkey_access(shared_qp_sec->security, 95 subnet_prefix, 96 pkey); 97 if (ret) 98 return ret; 99 } 100 return 0; 101 } 102 103 /* The caller of this function must hold the QP security 104 * mutex of the QP of the security structure in *pps. 105 * 106 * It takes separate ports_pkeys and security structure 107 * because in some cases the pps will be for a new settings 108 * or the pps will be for the real QP and security structure 109 * will be for a shared QP. 110 */ 111 static int check_qp_port_pkey_settings(struct ib_ports_pkeys *pps, 112 struct ib_qp_security *sec) 113 { 114 u64 subnet_prefix; 115 u16 pkey; 116 int ret = 0; 117 118 if (!pps) 119 return 0; 120 121 if (pps->main.state != IB_PORT_PKEY_NOT_VALID) { 122 ret = get_pkey_and_subnet_prefix(&pps->main, 123 &pkey, 124 &subnet_prefix); 125 if (ret) 126 return ret; 127 128 ret = enforce_qp_pkey_security(pkey, 129 subnet_prefix, 130 sec); 131 if (ret) 132 return ret; 133 } 134 135 if (pps->alt.state != IB_PORT_PKEY_NOT_VALID) { 136 ret = get_pkey_and_subnet_prefix(&pps->alt, 137 &pkey, 138 &subnet_prefix); 139 if (ret) 140 return ret; 141 142 ret = enforce_qp_pkey_security(pkey, 143 subnet_prefix, 144 sec); 145 } 146 147 return ret; 148 } 149 150 /* The caller of this function must hold the QP security 151 * mutex. 152 */ 153 static void qp_to_error(struct ib_qp_security *sec) 154 { 155 struct ib_qp_security *shared_qp_sec; 156 struct ib_qp_attr attr = { 157 .qp_state = IB_QPS_ERR 158 }; 159 struct ib_event event = { 160 .event = IB_EVENT_QP_FATAL 161 }; 162 163 /* If the QP is in the process of being destroyed 164 * the qp pointer in the security structure is 165 * undefined. It cannot be modified now. 166 */ 167 if (sec->destroying) 168 return; 169 170 ib_modify_qp(sec->qp, 171 &attr, 172 IB_QP_STATE); 173 174 if (sec->qp->event_handler && sec->qp->qp_context) { 175 event.element.qp = sec->qp; 176 sec->qp->event_handler(&event, 177 sec->qp->qp_context); 178 } 179 180 list_for_each_entry(shared_qp_sec, 181 &sec->shared_qp_list, 182 shared_qp_list) { 183 struct ib_qp *qp = shared_qp_sec->qp; 184 185 if (qp->event_handler && qp->qp_context) { 186 event.element.qp = qp; 187 event.device = qp->device; 188 qp->event_handler(&event, 189 qp->qp_context); 190 } 191 } 192 } 193 194 static inline void check_pkey_qps(struct pkey_index_qp_list *pkey, 195 struct ib_device *device, 196 u8 port_num, 197 u64 subnet_prefix) 198 { 199 struct ib_port_pkey *pp, *tmp_pp; 200 bool comp; 201 LIST_HEAD(to_error_list); 202 u16 pkey_val; 203 204 if (!ib_get_cached_pkey(device, 205 port_num, 206 pkey->pkey_index, 207 &pkey_val)) { 208 spin_lock(&pkey->qp_list_lock); 209 list_for_each_entry(pp, &pkey->qp_list, qp_list) { 210 if (atomic_read(&pp->sec->error_list_count)) 211 continue; 212 213 if (enforce_qp_pkey_security(pkey_val, 214 subnet_prefix, 215 pp->sec)) { 216 atomic_inc(&pp->sec->error_list_count); 217 list_add(&pp->to_error_list, 218 &to_error_list); 219 } 220 } 221 spin_unlock(&pkey->qp_list_lock); 222 } 223 224 list_for_each_entry_safe(pp, 225 tmp_pp, 226 &to_error_list, 227 to_error_list) { 228 mutex_lock(&pp->sec->mutex); 229 qp_to_error(pp->sec); 230 list_del(&pp->to_error_list); 231 atomic_dec(&pp->sec->error_list_count); 232 comp = pp->sec->destroying; 233 mutex_unlock(&pp->sec->mutex); 234 235 if (comp) 236 complete(&pp->sec->error_complete); 237 } 238 } 239 240 /* The caller of this function must hold the QP security 241 * mutex. 242 */ 243 static int port_pkey_list_insert(struct ib_port_pkey *pp) 244 { 245 struct pkey_index_qp_list *tmp_pkey; 246 struct pkey_index_qp_list *pkey; 247 struct ib_device *dev; 248 u8 port_num = pp->port_num; 249 int ret = 0; 250 251 if (pp->state != IB_PORT_PKEY_VALID) 252 return 0; 253 254 dev = pp->sec->dev; 255 256 pkey = get_pkey_idx_qp_list(pp); 257 258 if (!pkey) { 259 bool found = false; 260 261 pkey = kzalloc(sizeof(*pkey), GFP_KERNEL); 262 if (!pkey) 263 return -ENOMEM; 264 265 spin_lock(&dev->port_data[port_num].pkey_list_lock); 266 /* Check for the PKey again. A racing process may 267 * have created it. 268 */ 269 list_for_each_entry(tmp_pkey, 270 &dev->port_data[port_num].pkey_list, 271 pkey_index_list) { 272 if (tmp_pkey->pkey_index == pp->pkey_index) { 273 kfree(pkey); 274 pkey = tmp_pkey; 275 found = true; 276 break; 277 } 278 } 279 280 if (!found) { 281 pkey->pkey_index = pp->pkey_index; 282 spin_lock_init(&pkey->qp_list_lock); 283 INIT_LIST_HEAD(&pkey->qp_list); 284 list_add(&pkey->pkey_index_list, 285 &dev->port_data[port_num].pkey_list); 286 } 287 spin_unlock(&dev->port_data[port_num].pkey_list_lock); 288 } 289 290 spin_lock(&pkey->qp_list_lock); 291 list_add(&pp->qp_list, &pkey->qp_list); 292 spin_unlock(&pkey->qp_list_lock); 293 294 pp->state = IB_PORT_PKEY_LISTED; 295 296 return ret; 297 } 298 299 /* The caller of this function must hold the QP security 300 * mutex. 301 */ 302 static void port_pkey_list_remove(struct ib_port_pkey *pp) 303 { 304 struct pkey_index_qp_list *pkey; 305 306 if (pp->state != IB_PORT_PKEY_LISTED) 307 return; 308 309 pkey = get_pkey_idx_qp_list(pp); 310 311 spin_lock(&pkey->qp_list_lock); 312 list_del(&pp->qp_list); 313 spin_unlock(&pkey->qp_list_lock); 314 315 /* The setting may still be valid, i.e. after 316 * a destroy has failed for example. 317 */ 318 pp->state = IB_PORT_PKEY_VALID; 319 } 320 321 static void destroy_qp_security(struct ib_qp_security *sec) 322 { 323 security_ib_free_security(sec->security); 324 kfree(sec->ports_pkeys); 325 kfree(sec); 326 } 327 328 /* The caller of this function must hold the QP security 329 * mutex. 330 */ 331 static struct ib_ports_pkeys *get_new_pps(const struct ib_qp *qp, 332 const struct ib_qp_attr *qp_attr, 333 int qp_attr_mask) 334 { 335 struct ib_ports_pkeys *new_pps; 336 struct ib_ports_pkeys *qp_pps = qp->qp_sec->ports_pkeys; 337 338 new_pps = kzalloc(sizeof(*new_pps), GFP_KERNEL); 339 if (!new_pps) 340 return NULL; 341 342 if (qp_attr_mask & IB_QP_PORT) 343 new_pps->main.port_num = qp_attr->port_num; 344 else if (qp_pps) 345 new_pps->main.port_num = qp_pps->main.port_num; 346 347 if (qp_attr_mask & IB_QP_PKEY_INDEX) 348 new_pps->main.pkey_index = qp_attr->pkey_index; 349 else if (qp_pps) 350 new_pps->main.pkey_index = qp_pps->main.pkey_index; 351 352 if ((qp_attr_mask & IB_QP_PKEY_INDEX) && (qp_attr_mask & IB_QP_PORT)) 353 new_pps->main.state = IB_PORT_PKEY_VALID; 354 355 if (!(qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) && qp_pps) { 356 new_pps->main.port_num = qp_pps->main.port_num; 357 new_pps->main.pkey_index = qp_pps->main.pkey_index; 358 if (qp_pps->main.state != IB_PORT_PKEY_NOT_VALID) 359 new_pps->main.state = IB_PORT_PKEY_VALID; 360 } 361 362 if (qp_attr_mask & IB_QP_ALT_PATH) { 363 new_pps->alt.port_num = qp_attr->alt_port_num; 364 new_pps->alt.pkey_index = qp_attr->alt_pkey_index; 365 new_pps->alt.state = IB_PORT_PKEY_VALID; 366 } else if (qp_pps) { 367 new_pps->alt.port_num = qp_pps->alt.port_num; 368 new_pps->alt.pkey_index = qp_pps->alt.pkey_index; 369 if (qp_pps->alt.state != IB_PORT_PKEY_NOT_VALID) 370 new_pps->alt.state = IB_PORT_PKEY_VALID; 371 } 372 373 new_pps->main.sec = qp->qp_sec; 374 new_pps->alt.sec = qp->qp_sec; 375 return new_pps; 376 } 377 378 int ib_open_shared_qp_security(struct ib_qp *qp, struct ib_device *dev) 379 { 380 struct ib_qp *real_qp = qp->real_qp; 381 int ret; 382 383 ret = ib_create_qp_security(qp, dev); 384 385 if (ret) 386 return ret; 387 388 if (!qp->qp_sec) 389 return 0; 390 391 mutex_lock(&real_qp->qp_sec->mutex); 392 ret = check_qp_port_pkey_settings(real_qp->qp_sec->ports_pkeys, 393 qp->qp_sec); 394 395 if (ret) 396 goto ret; 397 398 if (qp != real_qp) 399 list_add(&qp->qp_sec->shared_qp_list, 400 &real_qp->qp_sec->shared_qp_list); 401 ret: 402 mutex_unlock(&real_qp->qp_sec->mutex); 403 if (ret) 404 destroy_qp_security(qp->qp_sec); 405 406 return ret; 407 } 408 409 void ib_close_shared_qp_security(struct ib_qp_security *sec) 410 { 411 struct ib_qp *real_qp = sec->qp->real_qp; 412 413 mutex_lock(&real_qp->qp_sec->mutex); 414 list_del(&sec->shared_qp_list); 415 mutex_unlock(&real_qp->qp_sec->mutex); 416 417 destroy_qp_security(sec); 418 } 419 420 int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev) 421 { 422 unsigned int i; 423 bool is_ib = false; 424 int ret; 425 426 rdma_for_each_port (dev, i) { 427 is_ib = rdma_protocol_ib(dev, i); 428 if (is_ib) 429 break; 430 } 431 432 /* If this isn't an IB device don't create the security context */ 433 if (!is_ib) 434 return 0; 435 436 qp->qp_sec = kzalloc(sizeof(*qp->qp_sec), GFP_KERNEL); 437 if (!qp->qp_sec) 438 return -ENOMEM; 439 440 qp->qp_sec->qp = qp; 441 qp->qp_sec->dev = dev; 442 mutex_init(&qp->qp_sec->mutex); 443 INIT_LIST_HEAD(&qp->qp_sec->shared_qp_list); 444 atomic_set(&qp->qp_sec->error_list_count, 0); 445 init_completion(&qp->qp_sec->error_complete); 446 ret = security_ib_alloc_security(&qp->qp_sec->security); 447 if (ret) { 448 kfree(qp->qp_sec); 449 qp->qp_sec = NULL; 450 } 451 452 return ret; 453 } 454 EXPORT_SYMBOL(ib_create_qp_security); 455 456 void ib_destroy_qp_security_begin(struct ib_qp_security *sec) 457 { 458 /* Return if not IB */ 459 if (!sec) 460 return; 461 462 mutex_lock(&sec->mutex); 463 464 /* Remove the QP from the lists so it won't get added to 465 * a to_error_list during the destroy process. 466 */ 467 if (sec->ports_pkeys) { 468 port_pkey_list_remove(&sec->ports_pkeys->main); 469 port_pkey_list_remove(&sec->ports_pkeys->alt); 470 } 471 472 /* If the QP is already in one or more of those lists 473 * the destroying flag will ensure the to error flow 474 * doesn't operate on an undefined QP. 475 */ 476 sec->destroying = true; 477 478 /* Record the error list count to know how many completions 479 * to wait for. 480 */ 481 sec->error_comps_pending = atomic_read(&sec->error_list_count); 482 483 mutex_unlock(&sec->mutex); 484 } 485 486 void ib_destroy_qp_security_abort(struct ib_qp_security *sec) 487 { 488 int ret; 489 int i; 490 491 /* Return if not IB */ 492 if (!sec) 493 return; 494 495 /* If a concurrent cache update is in progress this 496 * QP security could be marked for an error state 497 * transition. Wait for this to complete. 498 */ 499 for (i = 0; i < sec->error_comps_pending; i++) 500 wait_for_completion(&sec->error_complete); 501 502 mutex_lock(&sec->mutex); 503 sec->destroying = false; 504 505 /* Restore the position in the lists and verify 506 * access is still allowed in case a cache update 507 * occurred while attempting to destroy. 508 * 509 * Because these setting were listed already 510 * and removed during ib_destroy_qp_security_begin 511 * we know the pkey_index_qp_list for the PKey 512 * already exists so port_pkey_list_insert won't fail. 513 */ 514 if (sec->ports_pkeys) { 515 port_pkey_list_insert(&sec->ports_pkeys->main); 516 port_pkey_list_insert(&sec->ports_pkeys->alt); 517 } 518 519 ret = check_qp_port_pkey_settings(sec->ports_pkeys, sec); 520 if (ret) 521 qp_to_error(sec); 522 523 mutex_unlock(&sec->mutex); 524 } 525 526 void ib_destroy_qp_security_end(struct ib_qp_security *sec) 527 { 528 int i; 529 530 /* Return if not IB */ 531 if (!sec) 532 return; 533 534 /* If a concurrent cache update is occurring we must 535 * wait until this QP security structure is processed 536 * in the QP to error flow before destroying it because 537 * the to_error_list is in use. 538 */ 539 for (i = 0; i < sec->error_comps_pending; i++) 540 wait_for_completion(&sec->error_complete); 541 542 destroy_qp_security(sec); 543 } 544 545 void ib_security_cache_change(struct ib_device *device, 546 u8 port_num, 547 u64 subnet_prefix) 548 { 549 struct pkey_index_qp_list *pkey; 550 551 list_for_each_entry (pkey, &device->port_data[port_num].pkey_list, 552 pkey_index_list) { 553 check_pkey_qps(pkey, 554 device, 555 port_num, 556 subnet_prefix); 557 } 558 } 559 560 void ib_security_release_port_pkey_list(struct ib_device *device) 561 { 562 struct pkey_index_qp_list *pkey, *tmp_pkey; 563 unsigned int i; 564 565 rdma_for_each_port (device, i) { 566 list_for_each_entry_safe(pkey, 567 tmp_pkey, 568 &device->port_data[i].pkey_list, 569 pkey_index_list) { 570 list_del(&pkey->pkey_index_list); 571 kfree(pkey); 572 } 573 } 574 } 575 576 int ib_security_modify_qp(struct ib_qp *qp, 577 struct ib_qp_attr *qp_attr, 578 int qp_attr_mask, 579 struct ib_udata *udata) 580 { 581 int ret = 0; 582 struct ib_ports_pkeys *tmp_pps; 583 struct ib_ports_pkeys *new_pps = NULL; 584 struct ib_qp *real_qp = qp->real_qp; 585 bool special_qp = (real_qp->qp_type == IB_QPT_SMI || 586 real_qp->qp_type == IB_QPT_GSI || 587 real_qp->qp_type >= IB_QPT_RESERVED1); 588 bool pps_change = ((qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) || 589 (qp_attr_mask & IB_QP_ALT_PATH)); 590 591 WARN_ONCE((qp_attr_mask & IB_QP_PORT && 592 rdma_protocol_ib(real_qp->device, qp_attr->port_num) && 593 !real_qp->qp_sec), 594 "%s: QP security is not initialized for IB QP: %d\n", 595 __func__, real_qp->qp_num); 596 597 /* The port/pkey settings are maintained only for the real QP. Open 598 * handles on the real QP will be in the shared_qp_list. When 599 * enforcing security on the real QP all the shared QPs will be 600 * checked as well. 601 */ 602 603 if (pps_change && !special_qp && real_qp->qp_sec) { 604 mutex_lock(&real_qp->qp_sec->mutex); 605 new_pps = get_new_pps(real_qp, 606 qp_attr, 607 qp_attr_mask); 608 if (!new_pps) { 609 mutex_unlock(&real_qp->qp_sec->mutex); 610 return -ENOMEM; 611 } 612 /* Add this QP to the lists for the new port 613 * and pkey settings before checking for permission 614 * in case there is a concurrent cache update 615 * occurring. Walking the list for a cache change 616 * doesn't acquire the security mutex unless it's 617 * sending the QP to error. 618 */ 619 ret = port_pkey_list_insert(&new_pps->main); 620 621 if (!ret) 622 ret = port_pkey_list_insert(&new_pps->alt); 623 624 if (!ret) 625 ret = check_qp_port_pkey_settings(new_pps, 626 real_qp->qp_sec); 627 } 628 629 if (!ret) 630 ret = real_qp->device->ops.modify_qp(real_qp, 631 qp_attr, 632 qp_attr_mask, 633 udata); 634 635 if (new_pps) { 636 /* Clean up the lists and free the appropriate 637 * ports_pkeys structure. 638 */ 639 if (ret) { 640 tmp_pps = new_pps; 641 } else { 642 tmp_pps = real_qp->qp_sec->ports_pkeys; 643 real_qp->qp_sec->ports_pkeys = new_pps; 644 } 645 646 if (tmp_pps) { 647 port_pkey_list_remove(&tmp_pps->main); 648 port_pkey_list_remove(&tmp_pps->alt); 649 } 650 kfree(tmp_pps); 651 mutex_unlock(&real_qp->qp_sec->mutex); 652 } 653 return ret; 654 } 655 656 static int ib_security_pkey_access(struct ib_device *dev, 657 u8 port_num, 658 u16 pkey_index, 659 void *sec) 660 { 661 u64 subnet_prefix; 662 u16 pkey; 663 int ret; 664 665 if (!rdma_protocol_ib(dev, port_num)) 666 return 0; 667 668 ret = ib_get_cached_pkey(dev, port_num, pkey_index, &pkey); 669 if (ret) 670 return ret; 671 672 ret = ib_get_cached_subnet_prefix(dev, port_num, &subnet_prefix); 673 674 if (ret) 675 return ret; 676 677 return security_ib_pkey_access(sec, subnet_prefix, pkey); 678 } 679 680 void ib_mad_agent_security_change(void) 681 { 682 struct ib_mad_agent *ag; 683 684 spin_lock(&mad_agent_list_lock); 685 list_for_each_entry(ag, 686 &mad_agent_list, 687 mad_agent_sec_list) 688 WRITE_ONCE(ag->smp_allowed, 689 !security_ib_endport_manage_subnet(ag->security, 690 dev_name(&ag->device->dev), ag->port_num)); 691 spin_unlock(&mad_agent_list_lock); 692 } 693 694 int ib_mad_agent_security_setup(struct ib_mad_agent *agent, 695 enum ib_qp_type qp_type) 696 { 697 int ret; 698 699 if (!rdma_protocol_ib(agent->device, agent->port_num)) 700 return 0; 701 702 INIT_LIST_HEAD(&agent->mad_agent_sec_list); 703 704 ret = security_ib_alloc_security(&agent->security); 705 if (ret) 706 return ret; 707 708 if (qp_type != IB_QPT_SMI) 709 return 0; 710 711 spin_lock(&mad_agent_list_lock); 712 ret = security_ib_endport_manage_subnet(agent->security, 713 dev_name(&agent->device->dev), 714 agent->port_num); 715 if (ret) 716 goto free_security; 717 718 WRITE_ONCE(agent->smp_allowed, true); 719 list_add(&agent->mad_agent_sec_list, &mad_agent_list); 720 spin_unlock(&mad_agent_list_lock); 721 return 0; 722 723 free_security: 724 spin_unlock(&mad_agent_list_lock); 725 security_ib_free_security(agent->security); 726 return ret; 727 } 728 729 void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent) 730 { 731 if (!rdma_protocol_ib(agent->device, agent->port_num)) 732 return; 733 734 if (agent->qp->qp_type == IB_QPT_SMI) { 735 spin_lock(&mad_agent_list_lock); 736 list_del(&agent->mad_agent_sec_list); 737 spin_unlock(&mad_agent_list_lock); 738 } 739 740 security_ib_free_security(agent->security); 741 } 742 743 int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index) 744 { 745 if (!rdma_protocol_ib(map->agent.device, map->agent.port_num)) 746 return 0; 747 748 if (map->agent.qp->qp_type == IB_QPT_SMI) { 749 if (!READ_ONCE(map->agent.smp_allowed)) 750 return -EACCES; 751 return 0; 752 } 753 754 return ib_security_pkey_access(map->agent.device, 755 map->agent.port_num, 756 pkey_index, 757 map->agent.security); 758 } 759