1 /* 2 * Copyright(c) 2015-2017 Intel Corporation. 3 * 4 * This file is provided under a dual BSD/GPLv2 license. When using or 5 * redistributing this file, you may do so under either license. 6 * 7 * GPL LICENSE SUMMARY 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * BSD LICENSE 19 * 20 * Redistribution and use in source and binary forms, with or without 21 * modification, are permitted provided that the following conditions 22 * are met: 23 * 24 * - Redistributions of source code must retain the above copyright 25 * notice, this list of conditions and the following disclaimer. 26 * - Redistributions in binary form must reproduce the above copyright 27 * notice, this list of conditions and the following disclaimer in 28 * the documentation and/or other materials provided with the 29 * distribution. 30 * - Neither the name of Intel Corporation nor the names of its 31 * contributors may be used to endorse or promote products derived 32 * from this software without specific prior written permission. 33 * 34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 45 * 46 */ 47 48 #include <linux/pci.h> 49 #include <linux/netdevice.h> 50 #include <linux/vmalloc.h> 51 #include <linux/delay.h> 52 #include <linux/idr.h> 53 #include <linux/module.h> 54 #include <linux/printk.h> 55 #include <linux/hrtimer.h> 56 #include <linux/bitmap.h> 57 #include <rdma/rdma_vt.h> 58 59 #include "hfi.h" 60 #include "device.h" 61 #include "common.h" 62 #include "trace.h" 63 #include "mad.h" 64 #include "sdma.h" 65 #include "debugfs.h" 66 #include "verbs.h" 67 #include "aspm.h" 68 #include "affinity.h" 69 #include "vnic.h" 70 #include "exp_rcv.h" 71 72 #undef pr_fmt 73 #define pr_fmt(fmt) DRIVER_NAME ": " fmt 74 75 #define HFI1_MAX_ACTIVE_WORKQUEUE_ENTRIES 5 76 /* 77 * min buffers we want to have per context, after driver 78 */ 79 #define HFI1_MIN_USER_CTXT_BUFCNT 7 80 81 #define HFI1_MIN_HDRQ_EGRBUF_CNT 2 82 #define HFI1_MAX_HDRQ_EGRBUF_CNT 16352 83 #define HFI1_MIN_EAGER_BUFFER_SIZE (4 * 1024) /* 4KB */ 84 #define HFI1_MAX_EAGER_BUFFER_SIZE (256 * 1024) /* 256KB */ 85 86 /* 87 * Number of user receive contexts we are configured to use (to allow for more 88 * pio buffers per ctxt, etc.) Zero means use one user context per CPU. 89 */ 90 int num_user_contexts = -1; 91 module_param_named(num_user_contexts, num_user_contexts, uint, S_IRUGO); 92 MODULE_PARM_DESC( 93 num_user_contexts, "Set max number of user contexts to use"); 94 95 uint krcvqs[RXE_NUM_DATA_VL]; 96 int krcvqsset; 97 module_param_array(krcvqs, uint, &krcvqsset, S_IRUGO); 98 MODULE_PARM_DESC(krcvqs, "Array of the number of non-control kernel receive queues by VL"); 99 100 /* computed based on above array */ 101 unsigned long n_krcvqs; 102 103 static unsigned hfi1_rcvarr_split = 25; 104 module_param_named(rcvarr_split, hfi1_rcvarr_split, uint, S_IRUGO); 105 MODULE_PARM_DESC(rcvarr_split, "Percent of context's RcvArray entries used for Eager buffers"); 106 107 static uint eager_buffer_size = (8 << 20); /* 8MB */ 108 module_param(eager_buffer_size, uint, S_IRUGO); 109 MODULE_PARM_DESC(eager_buffer_size, "Size of the eager buffers, default: 8MB"); 110 111 static uint rcvhdrcnt = 2048; /* 2x the max eager buffer count */ 112 module_param_named(rcvhdrcnt, rcvhdrcnt, uint, S_IRUGO); 113 MODULE_PARM_DESC(rcvhdrcnt, "Receive header queue count (default 2048)"); 114 115 static uint hfi1_hdrq_entsize = 32; 116 module_param_named(hdrq_entsize, hfi1_hdrq_entsize, uint, S_IRUGO); 117 MODULE_PARM_DESC(hdrq_entsize, "Size of header queue entries: 2 - 8B, 16 - 64B (default), 32 - 128B"); 118 119 unsigned int user_credit_return_threshold = 33; /* default is 33% */ 120 module_param(user_credit_return_threshold, uint, S_IRUGO); 121 MODULE_PARM_DESC(user_credit_return_threshold, "Credit return threshold for user send contexts, return when unreturned credits passes this many blocks (in percent of allocated blocks, 0 is off)"); 122 123 static inline u64 encode_rcv_header_entry_size(u16 size); 124 125 static struct idr hfi1_unit_table; 126 127 static int hfi1_create_kctxt(struct hfi1_devdata *dd, 128 struct hfi1_pportdata *ppd) 129 { 130 struct hfi1_ctxtdata *rcd; 131 int ret; 132 133 /* Control context has to be always 0 */ 134 BUILD_BUG_ON(HFI1_CTRL_CTXT != 0); 135 136 ret = hfi1_create_ctxtdata(ppd, dd->node, &rcd); 137 if (ret < 0) { 138 dd_dev_err(dd, "Kernel receive context allocation failed\n"); 139 return ret; 140 } 141 142 /* 143 * Set up the kernel context flags here and now because they use 144 * default values for all receive side memories. User contexts will 145 * be handled as they are created. 146 */ 147 rcd->flags = HFI1_CAP_KGET(MULTI_PKT_EGR) | 148 HFI1_CAP_KGET(NODROP_RHQ_FULL) | 149 HFI1_CAP_KGET(NODROP_EGR_FULL) | 150 HFI1_CAP_KGET(DMA_RTAIL); 151 152 /* Control context must use DMA_RTAIL */ 153 if (rcd->ctxt == HFI1_CTRL_CTXT) 154 rcd->flags |= HFI1_CAP_DMA_RTAIL; 155 rcd->seq_cnt = 1; 156 157 rcd->sc = sc_alloc(dd, SC_ACK, rcd->rcvhdrqentsize, dd->node); 158 if (!rcd->sc) { 159 dd_dev_err(dd, "Kernel send context allocation failed\n"); 160 return -ENOMEM; 161 } 162 hfi1_init_ctxt(rcd->sc); 163 164 return 0; 165 } 166 167 /* 168 * Create the receive context array and one or more kernel contexts 169 */ 170 int hfi1_create_kctxts(struct hfi1_devdata *dd) 171 { 172 u16 i; 173 int ret; 174 175 dd->rcd = kzalloc_node(dd->num_rcv_contexts * sizeof(*dd->rcd), 176 GFP_KERNEL, dd->node); 177 if (!dd->rcd) 178 return -ENOMEM; 179 180 for (i = 0; i < dd->first_dyn_alloc_ctxt; ++i) { 181 ret = hfi1_create_kctxt(dd, dd->pport); 182 if (ret) 183 goto bail; 184 } 185 186 return 0; 187 bail: 188 for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i) 189 hfi1_free_ctxt(dd->rcd[i]); 190 191 /* All the contexts should be freed, free the array */ 192 kfree(dd->rcd); 193 dd->rcd = NULL; 194 return ret; 195 } 196 197 /* 198 * Helper routines for the receive context reference count (rcd and uctxt). 199 */ 200 static void hfi1_rcd_init(struct hfi1_ctxtdata *rcd) 201 { 202 kref_init(&rcd->kref); 203 } 204 205 /** 206 * hfi1_rcd_free - When reference is zero clean up. 207 * @kref: pointer to an initialized rcd data structure 208 * 209 */ 210 static void hfi1_rcd_free(struct kref *kref) 211 { 212 unsigned long flags; 213 struct hfi1_ctxtdata *rcd = 214 container_of(kref, struct hfi1_ctxtdata, kref); 215 216 hfi1_free_ctxtdata(rcd->dd, rcd); 217 218 spin_lock_irqsave(&rcd->dd->uctxt_lock, flags); 219 rcd->dd->rcd[rcd->ctxt] = NULL; 220 spin_unlock_irqrestore(&rcd->dd->uctxt_lock, flags); 221 222 kfree(rcd); 223 } 224 225 /** 226 * hfi1_rcd_put - decrement reference for rcd 227 * @rcd: pointer to an initialized rcd data structure 228 * 229 * Use this to put a reference after the init. 230 */ 231 int hfi1_rcd_put(struct hfi1_ctxtdata *rcd) 232 { 233 if (rcd) 234 return kref_put(&rcd->kref, hfi1_rcd_free); 235 236 return 0; 237 } 238 239 /** 240 * hfi1_rcd_get - increment reference for rcd 241 * @rcd: pointer to an initialized rcd data structure 242 * 243 * Use this to get a reference after the init. 244 */ 245 void hfi1_rcd_get(struct hfi1_ctxtdata *rcd) 246 { 247 kref_get(&rcd->kref); 248 } 249 250 /** 251 * allocate_rcd_index - allocate an rcd index from the rcd array 252 * @dd: pointer to a valid devdata structure 253 * @rcd: rcd data structure to assign 254 * @index: pointer to index that is allocated 255 * 256 * Find an empty index in the rcd array, and assign the given rcd to it. 257 * If the array is full, we are EBUSY. 258 * 259 */ 260 static int allocate_rcd_index(struct hfi1_devdata *dd, 261 struct hfi1_ctxtdata *rcd, u16 *index) 262 { 263 unsigned long flags; 264 u16 ctxt; 265 266 spin_lock_irqsave(&dd->uctxt_lock, flags); 267 for (ctxt = 0; ctxt < dd->num_rcv_contexts; ctxt++) 268 if (!dd->rcd[ctxt]) 269 break; 270 271 if (ctxt < dd->num_rcv_contexts) { 272 rcd->ctxt = ctxt; 273 dd->rcd[ctxt] = rcd; 274 hfi1_rcd_init(rcd); 275 } 276 spin_unlock_irqrestore(&dd->uctxt_lock, flags); 277 278 if (ctxt >= dd->num_rcv_contexts) 279 return -EBUSY; 280 281 *index = ctxt; 282 283 return 0; 284 } 285 286 /** 287 * hfi1_rcd_get_by_index_safe - validate the ctxt index before accessing the 288 * array 289 * @dd: pointer to a valid devdata structure 290 * @ctxt: the index of an possilbe rcd 291 * 292 * This is a wrapper for hfi1_rcd_get_by_index() to validate that the given 293 * ctxt index is valid. 294 * 295 * The caller is responsible for making the _put(). 296 * 297 */ 298 struct hfi1_ctxtdata *hfi1_rcd_get_by_index_safe(struct hfi1_devdata *dd, 299 u16 ctxt) 300 { 301 if (ctxt < dd->num_rcv_contexts) 302 return hfi1_rcd_get_by_index(dd, ctxt); 303 304 return NULL; 305 } 306 307 /** 308 * hfi1_rcd_get_by_index 309 * @dd: pointer to a valid devdata structure 310 * @ctxt: the index of an possilbe rcd 311 * 312 * We need to protect access to the rcd array. If access is needed to 313 * one or more index, get the protecting spinlock and then increment the 314 * kref. 315 * 316 * The caller is responsible for making the _put(). 317 * 318 */ 319 struct hfi1_ctxtdata *hfi1_rcd_get_by_index(struct hfi1_devdata *dd, u16 ctxt) 320 { 321 unsigned long flags; 322 struct hfi1_ctxtdata *rcd = NULL; 323 324 spin_lock_irqsave(&dd->uctxt_lock, flags); 325 if (dd->rcd[ctxt]) { 326 rcd = dd->rcd[ctxt]; 327 hfi1_rcd_get(rcd); 328 } 329 spin_unlock_irqrestore(&dd->uctxt_lock, flags); 330 331 return rcd; 332 } 333 334 /* 335 * Common code for user and kernel context create and setup. 336 * NOTE: the initial kref is done here (hf1_rcd_init()). 337 */ 338 int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa, 339 struct hfi1_ctxtdata **context) 340 { 341 struct hfi1_devdata *dd = ppd->dd; 342 struct hfi1_ctxtdata *rcd; 343 unsigned kctxt_ngroups = 0; 344 u32 base; 345 346 if (dd->rcv_entries.nctxt_extra > 347 dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt) 348 kctxt_ngroups = (dd->rcv_entries.nctxt_extra - 349 (dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt)); 350 rcd = kzalloc_node(sizeof(*rcd), GFP_KERNEL, numa); 351 if (rcd) { 352 u32 rcvtids, max_entries; 353 u16 ctxt; 354 int ret; 355 356 ret = allocate_rcd_index(dd, rcd, &ctxt); 357 if (ret) { 358 *context = NULL; 359 kfree(rcd); 360 return ret; 361 } 362 363 INIT_LIST_HEAD(&rcd->qp_wait_list); 364 hfi1_exp_tid_group_init(&rcd->tid_group_list); 365 hfi1_exp_tid_group_init(&rcd->tid_used_list); 366 hfi1_exp_tid_group_init(&rcd->tid_full_list); 367 rcd->ppd = ppd; 368 rcd->dd = dd; 369 __set_bit(0, rcd->in_use_ctxts); 370 rcd->numa_id = numa; 371 rcd->rcv_array_groups = dd->rcv_entries.ngroups; 372 373 mutex_init(&rcd->exp_lock); 374 375 hfi1_cdbg(PROC, "setting up context %u\n", rcd->ctxt); 376 377 /* 378 * Calculate the context's RcvArray entry starting point. 379 * We do this here because we have to take into account all 380 * the RcvArray entries that previous context would have 381 * taken and we have to account for any extra groups assigned 382 * to the static (kernel) or dynamic (vnic/user) contexts. 383 */ 384 if (ctxt < dd->first_dyn_alloc_ctxt) { 385 if (ctxt < kctxt_ngroups) { 386 base = ctxt * (dd->rcv_entries.ngroups + 1); 387 rcd->rcv_array_groups++; 388 } else { 389 base = kctxt_ngroups + 390 (ctxt * dd->rcv_entries.ngroups); 391 } 392 } else { 393 u16 ct = ctxt - dd->first_dyn_alloc_ctxt; 394 395 base = ((dd->n_krcv_queues * dd->rcv_entries.ngroups) + 396 kctxt_ngroups); 397 if (ct < dd->rcv_entries.nctxt_extra) { 398 base += ct * (dd->rcv_entries.ngroups + 1); 399 rcd->rcv_array_groups++; 400 } else { 401 base += dd->rcv_entries.nctxt_extra + 402 (ct * dd->rcv_entries.ngroups); 403 } 404 } 405 rcd->eager_base = base * dd->rcv_entries.group_size; 406 407 rcd->rcvhdrq_cnt = rcvhdrcnt; 408 rcd->rcvhdrqentsize = hfi1_hdrq_entsize; 409 /* 410 * Simple Eager buffer allocation: we have already pre-allocated 411 * the number of RcvArray entry groups. Each ctxtdata structure 412 * holds the number of groups for that context. 413 * 414 * To follow CSR requirements and maintain cacheline alignment, 415 * make sure all sizes and bases are multiples of group_size. 416 * 417 * The expected entry count is what is left after assigning 418 * eager. 419 */ 420 max_entries = rcd->rcv_array_groups * 421 dd->rcv_entries.group_size; 422 rcvtids = ((max_entries * hfi1_rcvarr_split) / 100); 423 rcd->egrbufs.count = round_down(rcvtids, 424 dd->rcv_entries.group_size); 425 if (rcd->egrbufs.count > MAX_EAGER_ENTRIES) { 426 dd_dev_err(dd, "ctxt%u: requested too many RcvArray entries.\n", 427 rcd->ctxt); 428 rcd->egrbufs.count = MAX_EAGER_ENTRIES; 429 } 430 hfi1_cdbg(PROC, 431 "ctxt%u: max Eager buffer RcvArray entries: %u\n", 432 rcd->ctxt, rcd->egrbufs.count); 433 434 /* 435 * Allocate array that will hold the eager buffer accounting 436 * data. 437 * This will allocate the maximum possible buffer count based 438 * on the value of the RcvArray split parameter. 439 * The resulting value will be rounded down to the closest 440 * multiple of dd->rcv_entries.group_size. 441 */ 442 rcd->egrbufs.buffers = kzalloc_node( 443 rcd->egrbufs.count * sizeof(*rcd->egrbufs.buffers), 444 GFP_KERNEL, numa); 445 if (!rcd->egrbufs.buffers) 446 goto bail; 447 rcd->egrbufs.rcvtids = kzalloc_node( 448 rcd->egrbufs.count * 449 sizeof(*rcd->egrbufs.rcvtids), 450 GFP_KERNEL, numa); 451 if (!rcd->egrbufs.rcvtids) 452 goto bail; 453 rcd->egrbufs.size = eager_buffer_size; 454 /* 455 * The size of the buffers programmed into the RcvArray 456 * entries needs to be big enough to handle the highest 457 * MTU supported. 458 */ 459 if (rcd->egrbufs.size < hfi1_max_mtu) { 460 rcd->egrbufs.size = __roundup_pow_of_two(hfi1_max_mtu); 461 hfi1_cdbg(PROC, 462 "ctxt%u: eager bufs size too small. Adjusting to %zu\n", 463 rcd->ctxt, rcd->egrbufs.size); 464 } 465 rcd->egrbufs.rcvtid_size = HFI1_MAX_EAGER_BUFFER_SIZE; 466 467 /* Applicable only for statically created kernel contexts */ 468 if (ctxt < dd->first_dyn_alloc_ctxt) { 469 rcd->opstats = kzalloc_node(sizeof(*rcd->opstats), 470 GFP_KERNEL, numa); 471 if (!rcd->opstats) 472 goto bail; 473 } 474 475 *context = rcd; 476 return 0; 477 } 478 479 bail: 480 *context = NULL; 481 hfi1_free_ctxt(rcd); 482 return -ENOMEM; 483 } 484 485 /** 486 * hfi1_free_ctxt 487 * @rcd: pointer to an initialized rcd data structure 488 * 489 * This wrapper is the free function that matches hfi1_create_ctxtdata(). 490 * When a context is done being used (kernel or user), this function is called 491 * for the "final" put to match the kref init from hf1i_create_ctxtdata(). 492 * Other users of the context do a get/put sequence to make sure that the 493 * structure isn't removed while in use. 494 */ 495 void hfi1_free_ctxt(struct hfi1_ctxtdata *rcd) 496 { 497 hfi1_rcd_put(rcd); 498 } 499 500 /* 501 * Convert a receive header entry size that to the encoding used in the CSR. 502 * 503 * Return a zero if the given size is invalid. 504 */ 505 static inline u64 encode_rcv_header_entry_size(u16 size) 506 { 507 /* there are only 3 valid receive header entry sizes */ 508 if (size == 2) 509 return 1; 510 if (size == 16) 511 return 2; 512 else if (size == 32) 513 return 4; 514 return 0; /* invalid */ 515 } 516 517 /* 518 * Select the largest ccti value over all SLs to determine the intra- 519 * packet gap for the link. 520 * 521 * called with cca_timer_lock held (to protect access to cca_timer 522 * array), and rcu_read_lock() (to protect access to cc_state). 523 */ 524 void set_link_ipg(struct hfi1_pportdata *ppd) 525 { 526 struct hfi1_devdata *dd = ppd->dd; 527 struct cc_state *cc_state; 528 int i; 529 u16 cce, ccti_limit, max_ccti = 0; 530 u16 shift, mult; 531 u64 src; 532 u32 current_egress_rate; /* Mbits /sec */ 533 u32 max_pkt_time; 534 /* 535 * max_pkt_time is the maximum packet egress time in units 536 * of the fabric clock period 1/(805 MHz). 537 */ 538 539 cc_state = get_cc_state(ppd); 540 541 if (!cc_state) 542 /* 543 * This should _never_ happen - rcu_read_lock() is held, 544 * and set_link_ipg() should not be called if cc_state 545 * is NULL. 546 */ 547 return; 548 549 for (i = 0; i < OPA_MAX_SLS; i++) { 550 u16 ccti = ppd->cca_timer[i].ccti; 551 552 if (ccti > max_ccti) 553 max_ccti = ccti; 554 } 555 556 ccti_limit = cc_state->cct.ccti_limit; 557 if (max_ccti > ccti_limit) 558 max_ccti = ccti_limit; 559 560 cce = cc_state->cct.entries[max_ccti].entry; 561 shift = (cce & 0xc000) >> 14; 562 mult = (cce & 0x3fff); 563 564 current_egress_rate = active_egress_rate(ppd); 565 566 max_pkt_time = egress_cycles(ppd->ibmaxlen, current_egress_rate); 567 568 src = (max_pkt_time >> shift) * mult; 569 570 src &= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SMASK; 571 src <<= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SHIFT; 572 573 write_csr(dd, SEND_STATIC_RATE_CONTROL, src); 574 } 575 576 static enum hrtimer_restart cca_timer_fn(struct hrtimer *t) 577 { 578 struct cca_timer *cca_timer; 579 struct hfi1_pportdata *ppd; 580 int sl; 581 u16 ccti_timer, ccti_min; 582 struct cc_state *cc_state; 583 unsigned long flags; 584 enum hrtimer_restart ret = HRTIMER_NORESTART; 585 586 cca_timer = container_of(t, struct cca_timer, hrtimer); 587 ppd = cca_timer->ppd; 588 sl = cca_timer->sl; 589 590 rcu_read_lock(); 591 592 cc_state = get_cc_state(ppd); 593 594 if (!cc_state) { 595 rcu_read_unlock(); 596 return HRTIMER_NORESTART; 597 } 598 599 /* 600 * 1) decrement ccti for SL 601 * 2) calculate IPG for link (set_link_ipg()) 602 * 3) restart timer, unless ccti is at min value 603 */ 604 605 ccti_min = cc_state->cong_setting.entries[sl].ccti_min; 606 ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer; 607 608 spin_lock_irqsave(&ppd->cca_timer_lock, flags); 609 610 if (cca_timer->ccti > ccti_min) { 611 cca_timer->ccti--; 612 set_link_ipg(ppd); 613 } 614 615 if (cca_timer->ccti > ccti_min) { 616 unsigned long nsec = 1024 * ccti_timer; 617 /* ccti_timer is in units of 1.024 usec */ 618 hrtimer_forward_now(t, ns_to_ktime(nsec)); 619 ret = HRTIMER_RESTART; 620 } 621 622 spin_unlock_irqrestore(&ppd->cca_timer_lock, flags); 623 rcu_read_unlock(); 624 return ret; 625 } 626 627 /* 628 * Common code for initializing the physical port structure. 629 */ 630 void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd, 631 struct hfi1_devdata *dd, u8 hw_pidx, u8 port) 632 { 633 int i; 634 uint default_pkey_idx; 635 struct cc_state *cc_state; 636 637 ppd->dd = dd; 638 ppd->hw_pidx = hw_pidx; 639 ppd->port = port; /* IB port number, not index */ 640 641 default_pkey_idx = 1; 642 643 ppd->pkeys[default_pkey_idx] = DEFAULT_P_KEY; 644 ppd->part_enforce |= HFI1_PART_ENFORCE_IN; 645 646 if (loopback) { 647 hfi1_early_err(&pdev->dev, 648 "Faking data partition 0x8001 in idx %u\n", 649 !default_pkey_idx); 650 ppd->pkeys[!default_pkey_idx] = 0x8001; 651 } 652 653 INIT_WORK(&ppd->link_vc_work, handle_verify_cap); 654 INIT_WORK(&ppd->link_up_work, handle_link_up); 655 INIT_WORK(&ppd->link_down_work, handle_link_down); 656 INIT_WORK(&ppd->freeze_work, handle_freeze); 657 INIT_WORK(&ppd->link_downgrade_work, handle_link_downgrade); 658 INIT_WORK(&ppd->sma_message_work, handle_sma_message); 659 INIT_WORK(&ppd->link_bounce_work, handle_link_bounce); 660 INIT_DELAYED_WORK(&ppd->start_link_work, handle_start_link); 661 INIT_WORK(&ppd->linkstate_active_work, receive_interrupt_work); 662 INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event); 663 664 mutex_init(&ppd->hls_lock); 665 spin_lock_init(&ppd->qsfp_info.qsfp_lock); 666 667 ppd->qsfp_info.ppd = ppd; 668 ppd->sm_trap_qp = 0x0; 669 ppd->sa_qp = 0x1; 670 671 ppd->hfi1_wq = NULL; 672 673 spin_lock_init(&ppd->cca_timer_lock); 674 675 for (i = 0; i < OPA_MAX_SLS; i++) { 676 hrtimer_init(&ppd->cca_timer[i].hrtimer, CLOCK_MONOTONIC, 677 HRTIMER_MODE_REL); 678 ppd->cca_timer[i].ppd = ppd; 679 ppd->cca_timer[i].sl = i; 680 ppd->cca_timer[i].ccti = 0; 681 ppd->cca_timer[i].hrtimer.function = cca_timer_fn; 682 } 683 684 ppd->cc_max_table_entries = IB_CC_TABLE_CAP_DEFAULT; 685 686 spin_lock_init(&ppd->cc_state_lock); 687 spin_lock_init(&ppd->cc_log_lock); 688 cc_state = kzalloc(sizeof(*cc_state), GFP_KERNEL); 689 RCU_INIT_POINTER(ppd->cc_state, cc_state); 690 if (!cc_state) 691 goto bail; 692 return; 693 694 bail: 695 696 hfi1_early_err(&pdev->dev, 697 "Congestion Control Agent disabled for port %d\n", port); 698 } 699 700 /* 701 * Do initialization for device that is only needed on 702 * first detect, not on resets. 703 */ 704 static int loadtime_init(struct hfi1_devdata *dd) 705 { 706 return 0; 707 } 708 709 /** 710 * init_after_reset - re-initialize after a reset 711 * @dd: the hfi1_ib device 712 * 713 * sanity check at least some of the values after reset, and 714 * ensure no receive or transmit (explicitly, in case reset 715 * failed 716 */ 717 static int init_after_reset(struct hfi1_devdata *dd) 718 { 719 int i; 720 struct hfi1_ctxtdata *rcd; 721 /* 722 * Ensure chip does no sends or receives, tail updates, or 723 * pioavail updates while we re-initialize. This is mostly 724 * for the driver data structures, not chip registers. 725 */ 726 for (i = 0; i < dd->num_rcv_contexts; i++) { 727 rcd = hfi1_rcd_get_by_index(dd, i); 728 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS | 729 HFI1_RCVCTRL_INTRAVAIL_DIS | 730 HFI1_RCVCTRL_TAILUPD_DIS, rcd); 731 hfi1_rcd_put(rcd); 732 } 733 pio_send_control(dd, PSC_GLOBAL_DISABLE); 734 for (i = 0; i < dd->num_send_contexts; i++) 735 sc_disable(dd->send_contexts[i].sc); 736 737 return 0; 738 } 739 740 static void enable_chip(struct hfi1_devdata *dd) 741 { 742 struct hfi1_ctxtdata *rcd; 743 u32 rcvmask; 744 u16 i; 745 746 /* enable PIO send */ 747 pio_send_control(dd, PSC_GLOBAL_ENABLE); 748 749 /* 750 * Enable kernel ctxts' receive and receive interrupt. 751 * Other ctxts done as user opens and initializes them. 752 */ 753 for (i = 0; i < dd->first_dyn_alloc_ctxt; ++i) { 754 rcd = hfi1_rcd_get_by_index(dd, i); 755 if (!rcd) 756 continue; 757 rcvmask = HFI1_RCVCTRL_CTXT_ENB | HFI1_RCVCTRL_INTRAVAIL_ENB; 758 rcvmask |= HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ? 759 HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS; 760 if (!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) 761 rcvmask |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB; 762 if (HFI1_CAP_KGET_MASK(rcd->flags, NODROP_RHQ_FULL)) 763 rcvmask |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB; 764 if (HFI1_CAP_KGET_MASK(rcd->flags, NODROP_EGR_FULL)) 765 rcvmask |= HFI1_RCVCTRL_NO_EGR_DROP_ENB; 766 hfi1_rcvctrl(dd, rcvmask, rcd); 767 sc_enable(rcd->sc); 768 hfi1_rcd_put(rcd); 769 } 770 } 771 772 /** 773 * create_workqueues - create per port workqueues 774 * @dd: the hfi1_ib device 775 */ 776 static int create_workqueues(struct hfi1_devdata *dd) 777 { 778 int pidx; 779 struct hfi1_pportdata *ppd; 780 781 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 782 ppd = dd->pport + pidx; 783 if (!ppd->hfi1_wq) { 784 ppd->hfi1_wq = 785 alloc_workqueue( 786 "hfi%d_%d", 787 WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE, 788 HFI1_MAX_ACTIVE_WORKQUEUE_ENTRIES, 789 dd->unit, pidx); 790 if (!ppd->hfi1_wq) 791 goto wq_error; 792 } 793 if (!ppd->link_wq) { 794 /* 795 * Make the link workqueue single-threaded to enforce 796 * serialization. 797 */ 798 ppd->link_wq = 799 alloc_workqueue( 800 "hfi_link_%d_%d", 801 WQ_SYSFS | WQ_MEM_RECLAIM | WQ_UNBOUND, 802 1, /* max_active */ 803 dd->unit, pidx); 804 if (!ppd->link_wq) 805 goto wq_error; 806 } 807 } 808 return 0; 809 wq_error: 810 pr_err("alloc_workqueue failed for port %d\n", pidx + 1); 811 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 812 ppd = dd->pport + pidx; 813 if (ppd->hfi1_wq) { 814 destroy_workqueue(ppd->hfi1_wq); 815 ppd->hfi1_wq = NULL; 816 } 817 if (ppd->link_wq) { 818 destroy_workqueue(ppd->link_wq); 819 ppd->link_wq = NULL; 820 } 821 } 822 return -ENOMEM; 823 } 824 825 /** 826 * hfi1_init - do the actual initialization sequence on the chip 827 * @dd: the hfi1_ib device 828 * @reinit: re-initializing, so don't allocate new memory 829 * 830 * Do the actual initialization sequence on the chip. This is done 831 * both from the init routine called from the PCI infrastructure, and 832 * when we reset the chip, or detect that it was reset internally, 833 * or it's administratively re-enabled. 834 * 835 * Memory allocation here and in called routines is only done in 836 * the first case (reinit == 0). We have to be careful, because even 837 * without memory allocation, we need to re-write all the chip registers 838 * TIDs, etc. after the reset or enable has completed. 839 */ 840 int hfi1_init(struct hfi1_devdata *dd, int reinit) 841 { 842 int ret = 0, pidx, lastfail = 0; 843 unsigned long len; 844 u16 i; 845 struct hfi1_ctxtdata *rcd; 846 struct hfi1_pportdata *ppd; 847 848 /* Set up recv low level handlers */ 849 dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_EXPECTED] = 850 kdeth_process_expected; 851 dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_EAGER] = 852 kdeth_process_eager; 853 dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_IB] = process_receive_ib; 854 dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_ERROR] = 855 process_receive_error; 856 dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_BYPASS] = 857 process_receive_bypass; 858 dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_INVALID5] = 859 process_receive_invalid; 860 dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_INVALID6] = 861 process_receive_invalid; 862 dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_INVALID7] = 863 process_receive_invalid; 864 dd->rhf_rcv_function_map = dd->normal_rhf_rcv_functions; 865 866 /* Set up send low level handlers */ 867 dd->process_pio_send = hfi1_verbs_send_pio; 868 dd->process_dma_send = hfi1_verbs_send_dma; 869 dd->pio_inline_send = pio_copy; 870 dd->process_vnic_dma_send = hfi1_vnic_send_dma; 871 872 if (is_ax(dd)) { 873 atomic_set(&dd->drop_packet, DROP_PACKET_ON); 874 dd->do_drop = 1; 875 } else { 876 atomic_set(&dd->drop_packet, DROP_PACKET_OFF); 877 dd->do_drop = 0; 878 } 879 880 /* make sure the link is not "up" */ 881 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 882 ppd = dd->pport + pidx; 883 ppd->linkup = 0; 884 } 885 886 if (reinit) 887 ret = init_after_reset(dd); 888 else 889 ret = loadtime_init(dd); 890 if (ret) 891 goto done; 892 893 /* allocate dummy tail memory for all receive contexts */ 894 dd->rcvhdrtail_dummy_kvaddr = dma_zalloc_coherent( 895 &dd->pcidev->dev, sizeof(u64), 896 &dd->rcvhdrtail_dummy_dma, 897 GFP_KERNEL); 898 899 if (!dd->rcvhdrtail_dummy_kvaddr) { 900 dd_dev_err(dd, "cannot allocate dummy tail memory\n"); 901 ret = -ENOMEM; 902 goto done; 903 } 904 905 /* dd->rcd can be NULL if early initialization failed */ 906 for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i) { 907 /* 908 * Set up the (kernel) rcvhdr queue and egr TIDs. If doing 909 * re-init, the simplest way to handle this is to free 910 * existing, and re-allocate. 911 * Need to re-create rest of ctxt 0 ctxtdata as well. 912 */ 913 rcd = hfi1_rcd_get_by_index(dd, i); 914 if (!rcd) 915 continue; 916 917 rcd->do_interrupt = &handle_receive_interrupt; 918 919 lastfail = hfi1_create_rcvhdrq(dd, rcd); 920 if (!lastfail) 921 lastfail = hfi1_setup_eagerbufs(rcd); 922 if (lastfail) { 923 dd_dev_err(dd, 924 "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n"); 925 ret = lastfail; 926 } 927 hfi1_rcd_put(rcd); 928 } 929 930 /* Allocate enough memory for user event notification. */ 931 len = PAGE_ALIGN(dd->chip_rcv_contexts * HFI1_MAX_SHARED_CTXTS * 932 sizeof(*dd->events)); 933 dd->events = vmalloc_user(len); 934 if (!dd->events) 935 dd_dev_err(dd, "Failed to allocate user events page\n"); 936 /* 937 * Allocate a page for device and port status. 938 * Page will be shared amongst all user processes. 939 */ 940 dd->status = vmalloc_user(PAGE_SIZE); 941 if (!dd->status) 942 dd_dev_err(dd, "Failed to allocate dev status page\n"); 943 else 944 dd->freezelen = PAGE_SIZE - (sizeof(*dd->status) - 945 sizeof(dd->status->freezemsg)); 946 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 947 ppd = dd->pport + pidx; 948 if (dd->status) 949 /* Currently, we only have one port */ 950 ppd->statusp = &dd->status->port; 951 952 set_mtu(ppd); 953 } 954 955 /* enable chip even if we have an error, so we can debug cause */ 956 enable_chip(dd); 957 958 done: 959 /* 960 * Set status even if port serdes is not initialized 961 * so that diags will work. 962 */ 963 if (dd->status) 964 dd->status->dev |= HFI1_STATUS_CHIP_PRESENT | 965 HFI1_STATUS_INITTED; 966 if (!ret) { 967 /* enable all interrupts from the chip */ 968 set_intr_state(dd, 1); 969 970 /* chip is OK for user apps; mark it as initialized */ 971 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 972 ppd = dd->pport + pidx; 973 974 /* 975 * start the serdes - must be after interrupts are 976 * enabled so we are notified when the link goes up 977 */ 978 lastfail = bringup_serdes(ppd); 979 if (lastfail) 980 dd_dev_info(dd, 981 "Failed to bring up port %u\n", 982 ppd->port); 983 984 /* 985 * Set status even if port serdes is not initialized 986 * so that diags will work. 987 */ 988 if (ppd->statusp) 989 *ppd->statusp |= HFI1_STATUS_CHIP_PRESENT | 990 HFI1_STATUS_INITTED; 991 if (!ppd->link_speed_enabled) 992 continue; 993 } 994 } 995 996 /* if ret is non-zero, we probably should do some cleanup here... */ 997 return ret; 998 } 999 1000 static inline struct hfi1_devdata *__hfi1_lookup(int unit) 1001 { 1002 return idr_find(&hfi1_unit_table, unit); 1003 } 1004 1005 struct hfi1_devdata *hfi1_lookup(int unit) 1006 { 1007 struct hfi1_devdata *dd; 1008 unsigned long flags; 1009 1010 spin_lock_irqsave(&hfi1_devs_lock, flags); 1011 dd = __hfi1_lookup(unit); 1012 spin_unlock_irqrestore(&hfi1_devs_lock, flags); 1013 1014 return dd; 1015 } 1016 1017 /* 1018 * Stop the timers during unit shutdown, or after an error late 1019 * in initialization. 1020 */ 1021 static void stop_timers(struct hfi1_devdata *dd) 1022 { 1023 struct hfi1_pportdata *ppd; 1024 int pidx; 1025 1026 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1027 ppd = dd->pport + pidx; 1028 if (ppd->led_override_timer.function) { 1029 del_timer_sync(&ppd->led_override_timer); 1030 atomic_set(&ppd->led_override_timer_active, 0); 1031 } 1032 } 1033 } 1034 1035 /** 1036 * shutdown_device - shut down a device 1037 * @dd: the hfi1_ib device 1038 * 1039 * This is called to make the device quiet when we are about to 1040 * unload the driver, and also when the device is administratively 1041 * disabled. It does not free any data structures. 1042 * Everything it does has to be setup again by hfi1_init(dd, 1) 1043 */ 1044 static void shutdown_device(struct hfi1_devdata *dd) 1045 { 1046 struct hfi1_pportdata *ppd; 1047 struct hfi1_ctxtdata *rcd; 1048 unsigned pidx; 1049 int i; 1050 1051 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1052 ppd = dd->pport + pidx; 1053 1054 ppd->linkup = 0; 1055 if (ppd->statusp) 1056 *ppd->statusp &= ~(HFI1_STATUS_IB_CONF | 1057 HFI1_STATUS_IB_READY); 1058 } 1059 dd->flags &= ~HFI1_INITTED; 1060 1061 /* mask interrupts, but not errors */ 1062 set_intr_state(dd, 0); 1063 1064 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1065 ppd = dd->pport + pidx; 1066 for (i = 0; i < dd->num_rcv_contexts; i++) { 1067 rcd = hfi1_rcd_get_by_index(dd, i); 1068 hfi1_rcvctrl(dd, HFI1_RCVCTRL_TAILUPD_DIS | 1069 HFI1_RCVCTRL_CTXT_DIS | 1070 HFI1_RCVCTRL_INTRAVAIL_DIS | 1071 HFI1_RCVCTRL_PKEY_DIS | 1072 HFI1_RCVCTRL_ONE_PKT_EGR_DIS, rcd); 1073 hfi1_rcd_put(rcd); 1074 } 1075 /* 1076 * Gracefully stop all sends allowing any in progress to 1077 * trickle out first. 1078 */ 1079 for (i = 0; i < dd->num_send_contexts; i++) 1080 sc_flush(dd->send_contexts[i].sc); 1081 } 1082 1083 /* 1084 * Enough for anything that's going to trickle out to have actually 1085 * done so. 1086 */ 1087 udelay(20); 1088 1089 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1090 ppd = dd->pport + pidx; 1091 1092 /* disable all contexts */ 1093 for (i = 0; i < dd->num_send_contexts; i++) 1094 sc_disable(dd->send_contexts[i].sc); 1095 /* disable the send device */ 1096 pio_send_control(dd, PSC_GLOBAL_DISABLE); 1097 1098 shutdown_led_override(ppd); 1099 1100 /* 1101 * Clear SerdesEnable. 1102 * We can't count on interrupts since we are stopping. 1103 */ 1104 hfi1_quiet_serdes(ppd); 1105 1106 if (ppd->hfi1_wq) { 1107 destroy_workqueue(ppd->hfi1_wq); 1108 ppd->hfi1_wq = NULL; 1109 } 1110 if (ppd->link_wq) { 1111 destroy_workqueue(ppd->link_wq); 1112 ppd->link_wq = NULL; 1113 } 1114 } 1115 sdma_exit(dd); 1116 } 1117 1118 /** 1119 * hfi1_free_ctxtdata - free a context's allocated data 1120 * @dd: the hfi1_ib device 1121 * @rcd: the ctxtdata structure 1122 * 1123 * free up any allocated data for a context 1124 * It should never change any chip state, or global driver state. 1125 */ 1126 void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) 1127 { 1128 u32 e; 1129 1130 if (!rcd) 1131 return; 1132 1133 if (rcd->rcvhdrq) { 1134 dma_free_coherent(&dd->pcidev->dev, rcd->rcvhdrq_size, 1135 rcd->rcvhdrq, rcd->rcvhdrq_dma); 1136 rcd->rcvhdrq = NULL; 1137 if (rcd->rcvhdrtail_kvaddr) { 1138 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, 1139 (void *)rcd->rcvhdrtail_kvaddr, 1140 rcd->rcvhdrqtailaddr_dma); 1141 rcd->rcvhdrtail_kvaddr = NULL; 1142 } 1143 } 1144 1145 /* all the RcvArray entries should have been cleared by now */ 1146 kfree(rcd->egrbufs.rcvtids); 1147 rcd->egrbufs.rcvtids = NULL; 1148 1149 for (e = 0; e < rcd->egrbufs.alloced; e++) { 1150 if (rcd->egrbufs.buffers[e].dma) 1151 dma_free_coherent(&dd->pcidev->dev, 1152 rcd->egrbufs.buffers[e].len, 1153 rcd->egrbufs.buffers[e].addr, 1154 rcd->egrbufs.buffers[e].dma); 1155 } 1156 kfree(rcd->egrbufs.buffers); 1157 rcd->egrbufs.alloced = 0; 1158 rcd->egrbufs.buffers = NULL; 1159 1160 sc_free(rcd->sc); 1161 rcd->sc = NULL; 1162 1163 vfree(rcd->subctxt_uregbase); 1164 vfree(rcd->subctxt_rcvegrbuf); 1165 vfree(rcd->subctxt_rcvhdr_base); 1166 kfree(rcd->opstats); 1167 1168 rcd->subctxt_uregbase = NULL; 1169 rcd->subctxt_rcvegrbuf = NULL; 1170 rcd->subctxt_rcvhdr_base = NULL; 1171 rcd->opstats = NULL; 1172 } 1173 1174 /* 1175 * Release our hold on the shared asic data. If we are the last one, 1176 * return the structure to be finalized outside the lock. Must be 1177 * holding hfi1_devs_lock. 1178 */ 1179 static struct hfi1_asic_data *release_asic_data(struct hfi1_devdata *dd) 1180 { 1181 struct hfi1_asic_data *ad; 1182 int other; 1183 1184 if (!dd->asic_data) 1185 return NULL; 1186 dd->asic_data->dds[dd->hfi1_id] = NULL; 1187 other = dd->hfi1_id ? 0 : 1; 1188 ad = dd->asic_data; 1189 dd->asic_data = NULL; 1190 /* return NULL if the other dd still has a link */ 1191 return ad->dds[other] ? NULL : ad; 1192 } 1193 1194 static void finalize_asic_data(struct hfi1_devdata *dd, 1195 struct hfi1_asic_data *ad) 1196 { 1197 clean_up_i2c(dd, ad); 1198 kfree(ad); 1199 } 1200 1201 static void __hfi1_free_devdata(struct kobject *kobj) 1202 { 1203 struct hfi1_devdata *dd = 1204 container_of(kobj, struct hfi1_devdata, kobj); 1205 struct hfi1_asic_data *ad; 1206 unsigned long flags; 1207 1208 spin_lock_irqsave(&hfi1_devs_lock, flags); 1209 idr_remove(&hfi1_unit_table, dd->unit); 1210 list_del(&dd->list); 1211 ad = release_asic_data(dd); 1212 spin_unlock_irqrestore(&hfi1_devs_lock, flags); 1213 if (ad) 1214 finalize_asic_data(dd, ad); 1215 free_platform_config(dd); 1216 rcu_barrier(); /* wait for rcu callbacks to complete */ 1217 free_percpu(dd->int_counter); 1218 free_percpu(dd->rcv_limit); 1219 free_percpu(dd->send_schedule); 1220 free_percpu(dd->tx_opstats); 1221 rvt_dealloc_device(&dd->verbs_dev.rdi); 1222 } 1223 1224 static struct kobj_type hfi1_devdata_type = { 1225 .release = __hfi1_free_devdata, 1226 }; 1227 1228 void hfi1_free_devdata(struct hfi1_devdata *dd) 1229 { 1230 kobject_put(&dd->kobj); 1231 } 1232 1233 /* 1234 * Allocate our primary per-unit data structure. Must be done via verbs 1235 * allocator, because the verbs cleanup process both does cleanup and 1236 * free of the data structure. 1237 * "extra" is for chip-specific data. 1238 * 1239 * Use the idr mechanism to get a unit number for this unit. 1240 */ 1241 struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra) 1242 { 1243 unsigned long flags; 1244 struct hfi1_devdata *dd; 1245 int ret, nports; 1246 1247 /* extra is * number of ports */ 1248 nports = extra / sizeof(struct hfi1_pportdata); 1249 1250 dd = (struct hfi1_devdata *)rvt_alloc_device(sizeof(*dd) + extra, 1251 nports); 1252 if (!dd) 1253 return ERR_PTR(-ENOMEM); 1254 dd->num_pports = nports; 1255 dd->pport = (struct hfi1_pportdata *)(dd + 1); 1256 1257 INIT_LIST_HEAD(&dd->list); 1258 idr_preload(GFP_KERNEL); 1259 spin_lock_irqsave(&hfi1_devs_lock, flags); 1260 1261 ret = idr_alloc(&hfi1_unit_table, dd, 0, 0, GFP_NOWAIT); 1262 if (ret >= 0) { 1263 dd->unit = ret; 1264 list_add(&dd->list, &hfi1_dev_list); 1265 } 1266 1267 spin_unlock_irqrestore(&hfi1_devs_lock, flags); 1268 idr_preload_end(); 1269 1270 if (ret < 0) { 1271 hfi1_early_err(&pdev->dev, 1272 "Could not allocate unit ID: error %d\n", -ret); 1273 goto bail; 1274 } 1275 rvt_set_ibdev_name(&dd->verbs_dev.rdi, "%s_%d", class_name(), dd->unit); 1276 1277 /* 1278 * Initialize all locks for the device. This needs to be as early as 1279 * possible so locks are usable. 1280 */ 1281 spin_lock_init(&dd->sc_lock); 1282 spin_lock_init(&dd->sendctrl_lock); 1283 spin_lock_init(&dd->rcvctrl_lock); 1284 spin_lock_init(&dd->uctxt_lock); 1285 spin_lock_init(&dd->hfi1_diag_trans_lock); 1286 spin_lock_init(&dd->sc_init_lock); 1287 spin_lock_init(&dd->dc8051_memlock); 1288 seqlock_init(&dd->sc2vl_lock); 1289 spin_lock_init(&dd->sde_map_lock); 1290 spin_lock_init(&dd->pio_map_lock); 1291 mutex_init(&dd->dc8051_lock); 1292 init_waitqueue_head(&dd->event_queue); 1293 1294 dd->int_counter = alloc_percpu(u64); 1295 if (!dd->int_counter) { 1296 ret = -ENOMEM; 1297 goto bail; 1298 } 1299 1300 dd->rcv_limit = alloc_percpu(u64); 1301 if (!dd->rcv_limit) { 1302 ret = -ENOMEM; 1303 goto bail; 1304 } 1305 1306 dd->send_schedule = alloc_percpu(u64); 1307 if (!dd->send_schedule) { 1308 ret = -ENOMEM; 1309 goto bail; 1310 } 1311 1312 dd->tx_opstats = alloc_percpu(struct hfi1_opcode_stats_perctx); 1313 if (!dd->tx_opstats) { 1314 ret = -ENOMEM; 1315 goto bail; 1316 } 1317 1318 kobject_init(&dd->kobj, &hfi1_devdata_type); 1319 return dd; 1320 1321 bail: 1322 if (!list_empty(&dd->list)) 1323 list_del_init(&dd->list); 1324 rvt_dealloc_device(&dd->verbs_dev.rdi); 1325 return ERR_PTR(ret); 1326 } 1327 1328 /* 1329 * Called from freeze mode handlers, and from PCI error 1330 * reporting code. Should be paranoid about state of 1331 * system and data structures. 1332 */ 1333 void hfi1_disable_after_error(struct hfi1_devdata *dd) 1334 { 1335 if (dd->flags & HFI1_INITTED) { 1336 u32 pidx; 1337 1338 dd->flags &= ~HFI1_INITTED; 1339 if (dd->pport) 1340 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1341 struct hfi1_pportdata *ppd; 1342 1343 ppd = dd->pport + pidx; 1344 if (dd->flags & HFI1_PRESENT) 1345 set_link_state(ppd, HLS_DN_DISABLE); 1346 1347 if (ppd->statusp) 1348 *ppd->statusp &= ~HFI1_STATUS_IB_READY; 1349 } 1350 } 1351 1352 /* 1353 * Mark as having had an error for driver, and also 1354 * for /sys and status word mapped to user programs. 1355 * This marks unit as not usable, until reset. 1356 */ 1357 if (dd->status) 1358 dd->status->dev |= HFI1_STATUS_HWERROR; 1359 } 1360 1361 static void remove_one(struct pci_dev *); 1362 static int init_one(struct pci_dev *, const struct pci_device_id *); 1363 1364 #define DRIVER_LOAD_MSG "Intel " DRIVER_NAME " loaded: " 1365 #define PFX DRIVER_NAME ": " 1366 1367 const struct pci_device_id hfi1_pci_tbl[] = { 1368 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL0) }, 1369 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL1) }, 1370 { 0, } 1371 }; 1372 1373 MODULE_DEVICE_TABLE(pci, hfi1_pci_tbl); 1374 1375 static struct pci_driver hfi1_pci_driver = { 1376 .name = DRIVER_NAME, 1377 .probe = init_one, 1378 .remove = remove_one, 1379 .id_table = hfi1_pci_tbl, 1380 .err_handler = &hfi1_pci_err_handler, 1381 }; 1382 1383 static void __init compute_krcvqs(void) 1384 { 1385 int i; 1386 1387 for (i = 0; i < krcvqsset; i++) 1388 n_krcvqs += krcvqs[i]; 1389 } 1390 1391 /* 1392 * Do all the generic driver unit- and chip-independent memory 1393 * allocation and initialization. 1394 */ 1395 static int __init hfi1_mod_init(void) 1396 { 1397 int ret; 1398 1399 ret = dev_init(); 1400 if (ret) 1401 goto bail; 1402 1403 ret = node_affinity_init(); 1404 if (ret) 1405 goto bail; 1406 1407 /* validate max MTU before any devices start */ 1408 if (!valid_opa_max_mtu(hfi1_max_mtu)) { 1409 pr_err("Invalid max_mtu 0x%x, using 0x%x instead\n", 1410 hfi1_max_mtu, HFI1_DEFAULT_MAX_MTU); 1411 hfi1_max_mtu = HFI1_DEFAULT_MAX_MTU; 1412 } 1413 /* valid CUs run from 1-128 in powers of 2 */ 1414 if (hfi1_cu > 128 || !is_power_of_2(hfi1_cu)) 1415 hfi1_cu = 1; 1416 /* valid credit return threshold is 0-100, variable is unsigned */ 1417 if (user_credit_return_threshold > 100) 1418 user_credit_return_threshold = 100; 1419 1420 compute_krcvqs(); 1421 /* 1422 * sanitize receive interrupt count, time must wait until after 1423 * the hardware type is known 1424 */ 1425 if (rcv_intr_count > RCV_HDR_HEAD_COUNTER_MASK) 1426 rcv_intr_count = RCV_HDR_HEAD_COUNTER_MASK; 1427 /* reject invalid combinations */ 1428 if (rcv_intr_count == 0 && rcv_intr_timeout == 0) { 1429 pr_err("Invalid mode: both receive interrupt count and available timeout are zero - setting interrupt count to 1\n"); 1430 rcv_intr_count = 1; 1431 } 1432 if (rcv_intr_count > 1 && rcv_intr_timeout == 0) { 1433 /* 1434 * Avoid indefinite packet delivery by requiring a timeout 1435 * if count is > 1. 1436 */ 1437 pr_err("Invalid mode: receive interrupt count greater than 1 and available timeout is zero - setting available timeout to 1\n"); 1438 rcv_intr_timeout = 1; 1439 } 1440 if (rcv_intr_dynamic && !(rcv_intr_count > 1 && rcv_intr_timeout > 0)) { 1441 /* 1442 * The dynamic algorithm expects a non-zero timeout 1443 * and a count > 1. 1444 */ 1445 pr_err("Invalid mode: dynamic receive interrupt mitigation with invalid count and timeout - turning dynamic off\n"); 1446 rcv_intr_dynamic = 0; 1447 } 1448 1449 /* sanitize link CRC options */ 1450 link_crc_mask &= SUPPORTED_CRCS; 1451 1452 /* 1453 * These must be called before the driver is registered with 1454 * the PCI subsystem. 1455 */ 1456 idr_init(&hfi1_unit_table); 1457 1458 hfi1_dbg_init(); 1459 ret = hfi1_wss_init(); 1460 if (ret < 0) 1461 goto bail_wss; 1462 ret = pci_register_driver(&hfi1_pci_driver); 1463 if (ret < 0) { 1464 pr_err("Unable to register driver: error %d\n", -ret); 1465 goto bail_dev; 1466 } 1467 goto bail; /* all OK */ 1468 1469 bail_dev: 1470 hfi1_wss_exit(); 1471 bail_wss: 1472 hfi1_dbg_exit(); 1473 idr_destroy(&hfi1_unit_table); 1474 dev_cleanup(); 1475 bail: 1476 return ret; 1477 } 1478 1479 module_init(hfi1_mod_init); 1480 1481 /* 1482 * Do the non-unit driver cleanup, memory free, etc. at unload. 1483 */ 1484 static void __exit hfi1_mod_cleanup(void) 1485 { 1486 pci_unregister_driver(&hfi1_pci_driver); 1487 node_affinity_destroy(); 1488 hfi1_wss_exit(); 1489 hfi1_dbg_exit(); 1490 1491 idr_destroy(&hfi1_unit_table); 1492 dispose_firmware(); /* asymmetric with obtain_firmware() */ 1493 dev_cleanup(); 1494 } 1495 1496 module_exit(hfi1_mod_cleanup); 1497 1498 /* this can only be called after a successful initialization */ 1499 static void cleanup_device_data(struct hfi1_devdata *dd) 1500 { 1501 int ctxt; 1502 int pidx; 1503 1504 /* users can't do anything more with chip */ 1505 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1506 struct hfi1_pportdata *ppd = &dd->pport[pidx]; 1507 struct cc_state *cc_state; 1508 int i; 1509 1510 if (ppd->statusp) 1511 *ppd->statusp &= ~HFI1_STATUS_CHIP_PRESENT; 1512 1513 for (i = 0; i < OPA_MAX_SLS; i++) 1514 hrtimer_cancel(&ppd->cca_timer[i].hrtimer); 1515 1516 spin_lock(&ppd->cc_state_lock); 1517 cc_state = get_cc_state_protected(ppd); 1518 RCU_INIT_POINTER(ppd->cc_state, NULL); 1519 spin_unlock(&ppd->cc_state_lock); 1520 1521 if (cc_state) 1522 kfree_rcu(cc_state, rcu); 1523 } 1524 1525 free_credit_return(dd); 1526 1527 if (dd->rcvhdrtail_dummy_kvaddr) { 1528 dma_free_coherent(&dd->pcidev->dev, sizeof(u64), 1529 (void *)dd->rcvhdrtail_dummy_kvaddr, 1530 dd->rcvhdrtail_dummy_dma); 1531 dd->rcvhdrtail_dummy_kvaddr = NULL; 1532 } 1533 1534 /* 1535 * Free any resources still in use (usually just kernel contexts) 1536 * at unload; we do for ctxtcnt, because that's what we allocate. 1537 */ 1538 for (ctxt = 0; dd->rcd && ctxt < dd->num_rcv_contexts; ctxt++) { 1539 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt]; 1540 1541 if (rcd) { 1542 hfi1_clear_tids(rcd); 1543 hfi1_free_ctxt(rcd); 1544 } 1545 } 1546 1547 kfree(dd->rcd); 1548 dd->rcd = NULL; 1549 1550 free_pio_map(dd); 1551 /* must follow rcv context free - need to remove rcv's hooks */ 1552 for (ctxt = 0; ctxt < dd->num_send_contexts; ctxt++) 1553 sc_free(dd->send_contexts[ctxt].sc); 1554 dd->num_send_contexts = 0; 1555 kfree(dd->send_contexts); 1556 dd->send_contexts = NULL; 1557 kfree(dd->hw_to_sw); 1558 dd->hw_to_sw = NULL; 1559 kfree(dd->boardname); 1560 vfree(dd->events); 1561 vfree(dd->status); 1562 } 1563 1564 /* 1565 * Clean up on unit shutdown, or error during unit load after 1566 * successful initialization. 1567 */ 1568 static void postinit_cleanup(struct hfi1_devdata *dd) 1569 { 1570 hfi1_start_cleanup(dd); 1571 1572 hfi1_pcie_ddcleanup(dd); 1573 hfi1_pcie_cleanup(dd->pcidev); 1574 1575 cleanup_device_data(dd); 1576 1577 hfi1_free_devdata(dd); 1578 } 1579 1580 static int init_validate_rcvhdrcnt(struct device *dev, uint thecnt) 1581 { 1582 if (thecnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) { 1583 hfi1_early_err(dev, "Receive header queue count too small\n"); 1584 return -EINVAL; 1585 } 1586 1587 if (thecnt > HFI1_MAX_HDRQ_EGRBUF_CNT) { 1588 hfi1_early_err(dev, 1589 "Receive header queue count cannot be greater than %u\n", 1590 HFI1_MAX_HDRQ_EGRBUF_CNT); 1591 return -EINVAL; 1592 } 1593 1594 if (thecnt % HDRQ_INCREMENT) { 1595 hfi1_early_err(dev, "Receive header queue count %d must be divisible by %lu\n", 1596 thecnt, HDRQ_INCREMENT); 1597 return -EINVAL; 1598 } 1599 1600 return 0; 1601 } 1602 1603 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 1604 { 1605 int ret = 0, j, pidx, initfail; 1606 struct hfi1_devdata *dd; 1607 struct hfi1_pportdata *ppd; 1608 1609 /* First, lock the non-writable module parameters */ 1610 HFI1_CAP_LOCK(); 1611 1612 /* Validate dev ids */ 1613 if (!(ent->device == PCI_DEVICE_ID_INTEL0 || 1614 ent->device == PCI_DEVICE_ID_INTEL1)) { 1615 hfi1_early_err(&pdev->dev, 1616 "Failing on unknown Intel deviceid 0x%x\n", 1617 ent->device); 1618 ret = -ENODEV; 1619 goto bail; 1620 } 1621 1622 /* Validate some global module parameters */ 1623 ret = init_validate_rcvhdrcnt(&pdev->dev, rcvhdrcnt); 1624 if (ret) 1625 goto bail; 1626 1627 /* use the encoding function as a sanitization check */ 1628 if (!encode_rcv_header_entry_size(hfi1_hdrq_entsize)) { 1629 hfi1_early_err(&pdev->dev, "Invalid HdrQ Entry size %u\n", 1630 hfi1_hdrq_entsize); 1631 ret = -EINVAL; 1632 goto bail; 1633 } 1634 1635 /* The receive eager buffer size must be set before the receive 1636 * contexts are created. 1637 * 1638 * Set the eager buffer size. Validate that it falls in a range 1639 * allowed by the hardware - all powers of 2 between the min and 1640 * max. The maximum valid MTU is within the eager buffer range 1641 * so we do not need to cap the max_mtu by an eager buffer size 1642 * setting. 1643 */ 1644 if (eager_buffer_size) { 1645 if (!is_power_of_2(eager_buffer_size)) 1646 eager_buffer_size = 1647 roundup_pow_of_two(eager_buffer_size); 1648 eager_buffer_size = 1649 clamp_val(eager_buffer_size, 1650 MIN_EAGER_BUFFER * 8, 1651 MAX_EAGER_BUFFER_TOTAL); 1652 hfi1_early_info(&pdev->dev, "Eager buffer size %u\n", 1653 eager_buffer_size); 1654 } else { 1655 hfi1_early_err(&pdev->dev, "Invalid Eager buffer size of 0\n"); 1656 ret = -EINVAL; 1657 goto bail; 1658 } 1659 1660 /* restrict value of hfi1_rcvarr_split */ 1661 hfi1_rcvarr_split = clamp_val(hfi1_rcvarr_split, 0, 100); 1662 1663 ret = hfi1_pcie_init(pdev, ent); 1664 if (ret) 1665 goto bail; 1666 1667 /* 1668 * Do device-specific initialization, function table setup, dd 1669 * allocation, etc. 1670 */ 1671 dd = hfi1_init_dd(pdev, ent); 1672 1673 if (IS_ERR(dd)) { 1674 ret = PTR_ERR(dd); 1675 goto clean_bail; /* error already printed */ 1676 } 1677 1678 ret = create_workqueues(dd); 1679 if (ret) 1680 goto clean_bail; 1681 1682 /* do the generic initialization */ 1683 initfail = hfi1_init(dd, 0); 1684 1685 /* setup vnic */ 1686 hfi1_vnic_setup(dd); 1687 1688 ret = hfi1_register_ib_device(dd); 1689 1690 /* 1691 * Now ready for use. this should be cleared whenever we 1692 * detect a reset, or initiate one. If earlier failure, 1693 * we still create devices, so diags, etc. can be used 1694 * to determine cause of problem. 1695 */ 1696 if (!initfail && !ret) { 1697 dd->flags |= HFI1_INITTED; 1698 /* create debufs files after init and ib register */ 1699 hfi1_dbg_ibdev_init(&dd->verbs_dev); 1700 } 1701 1702 j = hfi1_device_create(dd); 1703 if (j) 1704 dd_dev_err(dd, "Failed to create /dev devices: %d\n", -j); 1705 1706 if (initfail || ret) { 1707 stop_timers(dd); 1708 flush_workqueue(ib_wq); 1709 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1710 hfi1_quiet_serdes(dd->pport + pidx); 1711 ppd = dd->pport + pidx; 1712 if (ppd->hfi1_wq) { 1713 destroy_workqueue(ppd->hfi1_wq); 1714 ppd->hfi1_wq = NULL; 1715 } 1716 if (ppd->link_wq) { 1717 destroy_workqueue(ppd->link_wq); 1718 ppd->link_wq = NULL; 1719 } 1720 } 1721 if (!j) 1722 hfi1_device_remove(dd); 1723 if (!ret) 1724 hfi1_unregister_ib_device(dd); 1725 hfi1_vnic_cleanup(dd); 1726 postinit_cleanup(dd); 1727 if (initfail) 1728 ret = initfail; 1729 goto bail; /* everything already cleaned */ 1730 } 1731 1732 sdma_start(dd); 1733 1734 return 0; 1735 1736 clean_bail: 1737 hfi1_pcie_cleanup(pdev); 1738 bail: 1739 return ret; 1740 } 1741 1742 static void wait_for_clients(struct hfi1_devdata *dd) 1743 { 1744 /* 1745 * Remove the device init value and complete the device if there is 1746 * no clients or wait for active clients to finish. 1747 */ 1748 if (atomic_dec_and_test(&dd->user_refcount)) 1749 complete(&dd->user_comp); 1750 1751 wait_for_completion(&dd->user_comp); 1752 } 1753 1754 static void remove_one(struct pci_dev *pdev) 1755 { 1756 struct hfi1_devdata *dd = pci_get_drvdata(pdev); 1757 1758 /* close debugfs files before ib unregister */ 1759 hfi1_dbg_ibdev_exit(&dd->verbs_dev); 1760 1761 /* remove the /dev hfi1 interface */ 1762 hfi1_device_remove(dd); 1763 1764 /* wait for existing user space clients to finish */ 1765 wait_for_clients(dd); 1766 1767 /* unregister from IB core */ 1768 hfi1_unregister_ib_device(dd); 1769 1770 /* cleanup vnic */ 1771 hfi1_vnic_cleanup(dd); 1772 1773 /* 1774 * Disable the IB link, disable interrupts on the device, 1775 * clear dma engines, etc. 1776 */ 1777 shutdown_device(dd); 1778 1779 stop_timers(dd); 1780 1781 /* wait until all of our (qsfp) queue_work() calls complete */ 1782 flush_workqueue(ib_wq); 1783 1784 postinit_cleanup(dd); 1785 } 1786 1787 /** 1788 * hfi1_create_rcvhdrq - create a receive header queue 1789 * @dd: the hfi1_ib device 1790 * @rcd: the context data 1791 * 1792 * This must be contiguous memory (from an i/o perspective), and must be 1793 * DMA'able (which means for some systems, it will go through an IOMMU, 1794 * or be forced into a low address range). 1795 */ 1796 int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) 1797 { 1798 unsigned amt; 1799 u64 reg; 1800 1801 if (!rcd->rcvhdrq) { 1802 dma_addr_t dma_hdrqtail; 1803 gfp_t gfp_flags; 1804 1805 /* 1806 * rcvhdrqentsize is in DWs, so we have to convert to bytes 1807 * (* sizeof(u32)). 1808 */ 1809 amt = PAGE_ALIGN(rcd->rcvhdrq_cnt * rcd->rcvhdrqentsize * 1810 sizeof(u32)); 1811 1812 if (rcd->ctxt < dd->first_dyn_alloc_ctxt || rcd->is_vnic) 1813 gfp_flags = GFP_KERNEL; 1814 else 1815 gfp_flags = GFP_USER; 1816 rcd->rcvhdrq = dma_zalloc_coherent( 1817 &dd->pcidev->dev, amt, &rcd->rcvhdrq_dma, 1818 gfp_flags | __GFP_COMP); 1819 1820 if (!rcd->rcvhdrq) { 1821 dd_dev_err(dd, 1822 "attempt to allocate %d bytes for ctxt %u rcvhdrq failed\n", 1823 amt, rcd->ctxt); 1824 goto bail; 1825 } 1826 1827 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) { 1828 rcd->rcvhdrtail_kvaddr = dma_zalloc_coherent( 1829 &dd->pcidev->dev, PAGE_SIZE, &dma_hdrqtail, 1830 gfp_flags); 1831 if (!rcd->rcvhdrtail_kvaddr) 1832 goto bail_free; 1833 rcd->rcvhdrqtailaddr_dma = dma_hdrqtail; 1834 } 1835 1836 rcd->rcvhdrq_size = amt; 1837 } 1838 /* 1839 * These values are per-context: 1840 * RcvHdrCnt 1841 * RcvHdrEntSize 1842 * RcvHdrSize 1843 */ 1844 reg = ((u64)(rcd->rcvhdrq_cnt >> HDRQ_SIZE_SHIFT) 1845 & RCV_HDR_CNT_CNT_MASK) 1846 << RCV_HDR_CNT_CNT_SHIFT; 1847 write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_CNT, reg); 1848 reg = (encode_rcv_header_entry_size(rcd->rcvhdrqentsize) 1849 & RCV_HDR_ENT_SIZE_ENT_SIZE_MASK) 1850 << RCV_HDR_ENT_SIZE_ENT_SIZE_SHIFT; 1851 write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_ENT_SIZE, reg); 1852 reg = (dd->rcvhdrsize & RCV_HDR_SIZE_HDR_SIZE_MASK) 1853 << RCV_HDR_SIZE_HDR_SIZE_SHIFT; 1854 write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_SIZE, reg); 1855 1856 /* 1857 * Program dummy tail address for every receive context 1858 * before enabling any receive context 1859 */ 1860 write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_TAIL_ADDR, 1861 dd->rcvhdrtail_dummy_dma); 1862 1863 return 0; 1864 1865 bail_free: 1866 dd_dev_err(dd, 1867 "attempt to allocate 1 page for ctxt %u rcvhdrqtailaddr failed\n", 1868 rcd->ctxt); 1869 dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq, 1870 rcd->rcvhdrq_dma); 1871 rcd->rcvhdrq = NULL; 1872 bail: 1873 return -ENOMEM; 1874 } 1875 1876 /** 1877 * allocate eager buffers, both kernel and user contexts. 1878 * @rcd: the context we are setting up. 1879 * 1880 * Allocate the eager TID buffers and program them into hip. 1881 * They are no longer completely contiguous, we do multiple allocation 1882 * calls. Otherwise we get the OOM code involved, by asking for too 1883 * much per call, with disastrous results on some kernels. 1884 */ 1885 int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd) 1886 { 1887 struct hfi1_devdata *dd = rcd->dd; 1888 u32 max_entries, egrtop, alloced_bytes = 0, idx = 0; 1889 gfp_t gfp_flags; 1890 u16 order; 1891 int ret = 0; 1892 u16 round_mtu = roundup_pow_of_two(hfi1_max_mtu); 1893 1894 /* 1895 * GFP_USER, but without GFP_FS, so buffer cache can be 1896 * coalesced (we hope); otherwise, even at order 4, 1897 * heavy filesystem activity makes these fail, and we can 1898 * use compound pages. 1899 */ 1900 gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP; 1901 1902 /* 1903 * The minimum size of the eager buffers is a groups of MTU-sized 1904 * buffers. 1905 * The global eager_buffer_size parameter is checked against the 1906 * theoretical lower limit of the value. Here, we check against the 1907 * MTU. 1908 */ 1909 if (rcd->egrbufs.size < (round_mtu * dd->rcv_entries.group_size)) 1910 rcd->egrbufs.size = round_mtu * dd->rcv_entries.group_size; 1911 /* 1912 * If using one-pkt-per-egr-buffer, lower the eager buffer 1913 * size to the max MTU (page-aligned). 1914 */ 1915 if (!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) 1916 rcd->egrbufs.rcvtid_size = round_mtu; 1917 1918 /* 1919 * Eager buffers sizes of 1MB or less require smaller TID sizes 1920 * to satisfy the "multiple of 8 RcvArray entries" requirement. 1921 */ 1922 if (rcd->egrbufs.size <= (1 << 20)) 1923 rcd->egrbufs.rcvtid_size = max((unsigned long)round_mtu, 1924 rounddown_pow_of_two(rcd->egrbufs.size / 8)); 1925 1926 while (alloced_bytes < rcd->egrbufs.size && 1927 rcd->egrbufs.alloced < rcd->egrbufs.count) { 1928 rcd->egrbufs.buffers[idx].addr = 1929 dma_zalloc_coherent(&dd->pcidev->dev, 1930 rcd->egrbufs.rcvtid_size, 1931 &rcd->egrbufs.buffers[idx].dma, 1932 gfp_flags); 1933 if (rcd->egrbufs.buffers[idx].addr) { 1934 rcd->egrbufs.buffers[idx].len = 1935 rcd->egrbufs.rcvtid_size; 1936 rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].addr = 1937 rcd->egrbufs.buffers[idx].addr; 1938 rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].dma = 1939 rcd->egrbufs.buffers[idx].dma; 1940 rcd->egrbufs.alloced++; 1941 alloced_bytes += rcd->egrbufs.rcvtid_size; 1942 idx++; 1943 } else { 1944 u32 new_size, i, j; 1945 u64 offset = 0; 1946 1947 /* 1948 * Fail the eager buffer allocation if: 1949 * - we are already using the lowest acceptable size 1950 * - we are using one-pkt-per-egr-buffer (this implies 1951 * that we are accepting only one size) 1952 */ 1953 if (rcd->egrbufs.rcvtid_size == round_mtu || 1954 !HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) { 1955 dd_dev_err(dd, "ctxt%u: Failed to allocate eager buffers\n", 1956 rcd->ctxt); 1957 ret = -ENOMEM; 1958 goto bail_rcvegrbuf_phys; 1959 } 1960 1961 new_size = rcd->egrbufs.rcvtid_size / 2; 1962 1963 /* 1964 * If the first attempt to allocate memory failed, don't 1965 * fail everything but continue with the next lower 1966 * size. 1967 */ 1968 if (idx == 0) { 1969 rcd->egrbufs.rcvtid_size = new_size; 1970 continue; 1971 } 1972 1973 /* 1974 * Re-partition already allocated buffers to a smaller 1975 * size. 1976 */ 1977 rcd->egrbufs.alloced = 0; 1978 for (i = 0, j = 0, offset = 0; j < idx; i++) { 1979 if (i >= rcd->egrbufs.count) 1980 break; 1981 rcd->egrbufs.rcvtids[i].dma = 1982 rcd->egrbufs.buffers[j].dma + offset; 1983 rcd->egrbufs.rcvtids[i].addr = 1984 rcd->egrbufs.buffers[j].addr + offset; 1985 rcd->egrbufs.alloced++; 1986 if ((rcd->egrbufs.buffers[j].dma + offset + 1987 new_size) == 1988 (rcd->egrbufs.buffers[j].dma + 1989 rcd->egrbufs.buffers[j].len)) { 1990 j++; 1991 offset = 0; 1992 } else { 1993 offset += new_size; 1994 } 1995 } 1996 rcd->egrbufs.rcvtid_size = new_size; 1997 } 1998 } 1999 rcd->egrbufs.numbufs = idx; 2000 rcd->egrbufs.size = alloced_bytes; 2001 2002 hfi1_cdbg(PROC, 2003 "ctxt%u: Alloced %u rcv tid entries @ %uKB, total %zuKB\n", 2004 rcd->ctxt, rcd->egrbufs.alloced, 2005 rcd->egrbufs.rcvtid_size / 1024, rcd->egrbufs.size / 1024); 2006 2007 /* 2008 * Set the contexts rcv array head update threshold to the closest 2009 * power of 2 (so we can use a mask instead of modulo) below half 2010 * the allocated entries. 2011 */ 2012 rcd->egrbufs.threshold = 2013 rounddown_pow_of_two(rcd->egrbufs.alloced / 2); 2014 /* 2015 * Compute the expected RcvArray entry base. This is done after 2016 * allocating the eager buffers in order to maximize the 2017 * expected RcvArray entries for the context. 2018 */ 2019 max_entries = rcd->rcv_array_groups * dd->rcv_entries.group_size; 2020 egrtop = roundup(rcd->egrbufs.alloced, dd->rcv_entries.group_size); 2021 rcd->expected_count = max_entries - egrtop; 2022 if (rcd->expected_count > MAX_TID_PAIR_ENTRIES * 2) 2023 rcd->expected_count = MAX_TID_PAIR_ENTRIES * 2; 2024 2025 rcd->expected_base = rcd->eager_base + egrtop; 2026 hfi1_cdbg(PROC, "ctxt%u: eager:%u, exp:%u, egrbase:%u, expbase:%u\n", 2027 rcd->ctxt, rcd->egrbufs.alloced, rcd->expected_count, 2028 rcd->eager_base, rcd->expected_base); 2029 2030 if (!hfi1_rcvbuf_validate(rcd->egrbufs.rcvtid_size, PT_EAGER, &order)) { 2031 hfi1_cdbg(PROC, 2032 "ctxt%u: current Eager buffer size is invalid %u\n", 2033 rcd->ctxt, rcd->egrbufs.rcvtid_size); 2034 ret = -EINVAL; 2035 goto bail_rcvegrbuf_phys; 2036 } 2037 2038 for (idx = 0; idx < rcd->egrbufs.alloced; idx++) { 2039 hfi1_put_tid(dd, rcd->eager_base + idx, PT_EAGER, 2040 rcd->egrbufs.rcvtids[idx].dma, order); 2041 cond_resched(); 2042 } 2043 2044 return 0; 2045 2046 bail_rcvegrbuf_phys: 2047 for (idx = 0; idx < rcd->egrbufs.alloced && 2048 rcd->egrbufs.buffers[idx].addr; 2049 idx++) { 2050 dma_free_coherent(&dd->pcidev->dev, 2051 rcd->egrbufs.buffers[idx].len, 2052 rcd->egrbufs.buffers[idx].addr, 2053 rcd->egrbufs.buffers[idx].dma); 2054 rcd->egrbufs.buffers[idx].addr = NULL; 2055 rcd->egrbufs.buffers[idx].dma = 0; 2056 rcd->egrbufs.buffers[idx].len = 0; 2057 } 2058 2059 return ret; 2060 } 2061