1 /* 2 * Copyright(c) 2015 - 2018 Intel Corporation. 3 * 4 * This file is provided under a dual BSD/GPLv2 license. When using or 5 * redistributing this file, you may do so under either license. 6 * 7 * GPL LICENSE SUMMARY 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * BSD LICENSE 19 * 20 * Redistribution and use in source and binary forms, with or without 21 * modification, are permitted provided that the following conditions 22 * are met: 23 * 24 * - Redistributions of source code must retain the above copyright 25 * notice, this list of conditions and the following disclaimer. 26 * - Redistributions in binary form must reproduce the above copyright 27 * notice, this list of conditions and the following disclaimer in 28 * the documentation and/or other materials provided with the 29 * distribution. 30 * - Neither the name of Intel Corporation nor the names of its 31 * contributors may be used to endorse or promote products derived 32 * from this software without specific prior written permission. 33 * 34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 45 * 46 */ 47 48 #include <linux/pci.h> 49 #include <linux/netdevice.h> 50 #include <linux/vmalloc.h> 51 #include <linux/delay.h> 52 #include <linux/idr.h> 53 #include <linux/module.h> 54 #include <linux/printk.h> 55 #include <linux/hrtimer.h> 56 #include <linux/bitmap.h> 57 #include <rdma/rdma_vt.h> 58 59 #include "hfi.h" 60 #include "device.h" 61 #include "common.h" 62 #include "trace.h" 63 #include "mad.h" 64 #include "sdma.h" 65 #include "debugfs.h" 66 #include "verbs.h" 67 #include "aspm.h" 68 #include "affinity.h" 69 #include "vnic.h" 70 #include "exp_rcv.h" 71 72 #undef pr_fmt 73 #define pr_fmt(fmt) DRIVER_NAME ": " fmt 74 75 #define HFI1_MAX_ACTIVE_WORKQUEUE_ENTRIES 5 76 /* 77 * min buffers we want to have per context, after driver 78 */ 79 #define HFI1_MIN_USER_CTXT_BUFCNT 7 80 81 #define HFI1_MIN_HDRQ_EGRBUF_CNT 2 82 #define HFI1_MAX_HDRQ_EGRBUF_CNT 16352 83 #define HFI1_MIN_EAGER_BUFFER_SIZE (4 * 1024) /* 4KB */ 84 #define HFI1_MAX_EAGER_BUFFER_SIZE (256 * 1024) /* 256KB */ 85 86 /* 87 * Number of user receive contexts we are configured to use (to allow for more 88 * pio buffers per ctxt, etc.) Zero means use one user context per CPU. 89 */ 90 int num_user_contexts = -1; 91 module_param_named(num_user_contexts, num_user_contexts, int, 0444); 92 MODULE_PARM_DESC( 93 num_user_contexts, "Set max number of user contexts to use (default: -1 will use the real (non-HT) CPU count)"); 94 95 uint krcvqs[RXE_NUM_DATA_VL]; 96 int krcvqsset; 97 module_param_array(krcvqs, uint, &krcvqsset, S_IRUGO); 98 MODULE_PARM_DESC(krcvqs, "Array of the number of non-control kernel receive queues by VL"); 99 100 /* computed based on above array */ 101 unsigned long n_krcvqs; 102 103 static unsigned hfi1_rcvarr_split = 25; 104 module_param_named(rcvarr_split, hfi1_rcvarr_split, uint, S_IRUGO); 105 MODULE_PARM_DESC(rcvarr_split, "Percent of context's RcvArray entries used for Eager buffers"); 106 107 static uint eager_buffer_size = (8 << 20); /* 8MB */ 108 module_param(eager_buffer_size, uint, S_IRUGO); 109 MODULE_PARM_DESC(eager_buffer_size, "Size of the eager buffers, default: 8MB"); 110 111 static uint rcvhdrcnt = 2048; /* 2x the max eager buffer count */ 112 module_param_named(rcvhdrcnt, rcvhdrcnt, uint, S_IRUGO); 113 MODULE_PARM_DESC(rcvhdrcnt, "Receive header queue count (default 2048)"); 114 115 static uint hfi1_hdrq_entsize = 32; 116 module_param_named(hdrq_entsize, hfi1_hdrq_entsize, uint, 0444); 117 MODULE_PARM_DESC(hdrq_entsize, "Size of header queue entries: 2 - 8B, 16 - 64B, 32 - 128B (default)"); 118 119 unsigned int user_credit_return_threshold = 33; /* default is 33% */ 120 module_param(user_credit_return_threshold, uint, S_IRUGO); 121 MODULE_PARM_DESC(user_credit_return_threshold, "Credit return threshold for user send contexts, return when unreturned credits passes this many blocks (in percent of allocated blocks, 0 is off)"); 122 123 static inline u64 encode_rcv_header_entry_size(u16 size); 124 125 static struct idr hfi1_unit_table; 126 127 static int hfi1_create_kctxt(struct hfi1_devdata *dd, 128 struct hfi1_pportdata *ppd) 129 { 130 struct hfi1_ctxtdata *rcd; 131 int ret; 132 133 /* Control context has to be always 0 */ 134 BUILD_BUG_ON(HFI1_CTRL_CTXT != 0); 135 136 ret = hfi1_create_ctxtdata(ppd, dd->node, &rcd); 137 if (ret < 0) { 138 dd_dev_err(dd, "Kernel receive context allocation failed\n"); 139 return ret; 140 } 141 142 /* 143 * Set up the kernel context flags here and now because they use 144 * default values for all receive side memories. User contexts will 145 * be handled as they are created. 146 */ 147 rcd->flags = HFI1_CAP_KGET(MULTI_PKT_EGR) | 148 HFI1_CAP_KGET(NODROP_RHQ_FULL) | 149 HFI1_CAP_KGET(NODROP_EGR_FULL) | 150 HFI1_CAP_KGET(DMA_RTAIL); 151 152 /* Control context must use DMA_RTAIL */ 153 if (rcd->ctxt == HFI1_CTRL_CTXT) 154 rcd->flags |= HFI1_CAP_DMA_RTAIL; 155 rcd->seq_cnt = 1; 156 157 rcd->sc = sc_alloc(dd, SC_ACK, rcd->rcvhdrqentsize, dd->node); 158 if (!rcd->sc) { 159 dd_dev_err(dd, "Kernel send context allocation failed\n"); 160 return -ENOMEM; 161 } 162 hfi1_init_ctxt(rcd->sc); 163 164 return 0; 165 } 166 167 /* 168 * Create the receive context array and one or more kernel contexts 169 */ 170 int hfi1_create_kctxts(struct hfi1_devdata *dd) 171 { 172 u16 i; 173 int ret; 174 175 dd->rcd = kcalloc_node(dd->num_rcv_contexts, sizeof(*dd->rcd), 176 GFP_KERNEL, dd->node); 177 if (!dd->rcd) 178 return -ENOMEM; 179 180 for (i = 0; i < dd->first_dyn_alloc_ctxt; ++i) { 181 ret = hfi1_create_kctxt(dd, dd->pport); 182 if (ret) 183 goto bail; 184 } 185 186 return 0; 187 bail: 188 for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i) 189 hfi1_free_ctxt(dd->rcd[i]); 190 191 /* All the contexts should be freed, free the array */ 192 kfree(dd->rcd); 193 dd->rcd = NULL; 194 return ret; 195 } 196 197 /* 198 * Helper routines for the receive context reference count (rcd and uctxt). 199 */ 200 static void hfi1_rcd_init(struct hfi1_ctxtdata *rcd) 201 { 202 kref_init(&rcd->kref); 203 } 204 205 /** 206 * hfi1_rcd_free - When reference is zero clean up. 207 * @kref: pointer to an initialized rcd data structure 208 * 209 */ 210 static void hfi1_rcd_free(struct kref *kref) 211 { 212 unsigned long flags; 213 struct hfi1_ctxtdata *rcd = 214 container_of(kref, struct hfi1_ctxtdata, kref); 215 216 hfi1_free_ctxtdata(rcd->dd, rcd); 217 218 spin_lock_irqsave(&rcd->dd->uctxt_lock, flags); 219 rcd->dd->rcd[rcd->ctxt] = NULL; 220 spin_unlock_irqrestore(&rcd->dd->uctxt_lock, flags); 221 222 kfree(rcd); 223 } 224 225 /** 226 * hfi1_rcd_put - decrement reference for rcd 227 * @rcd: pointer to an initialized rcd data structure 228 * 229 * Use this to put a reference after the init. 230 */ 231 int hfi1_rcd_put(struct hfi1_ctxtdata *rcd) 232 { 233 if (rcd) 234 return kref_put(&rcd->kref, hfi1_rcd_free); 235 236 return 0; 237 } 238 239 /** 240 * hfi1_rcd_get - increment reference for rcd 241 * @rcd: pointer to an initialized rcd data structure 242 * 243 * Use this to get a reference after the init. 244 */ 245 void hfi1_rcd_get(struct hfi1_ctxtdata *rcd) 246 { 247 kref_get(&rcd->kref); 248 } 249 250 /** 251 * allocate_rcd_index - allocate an rcd index from the rcd array 252 * @dd: pointer to a valid devdata structure 253 * @rcd: rcd data structure to assign 254 * @index: pointer to index that is allocated 255 * 256 * Find an empty index in the rcd array, and assign the given rcd to it. 257 * If the array is full, we are EBUSY. 258 * 259 */ 260 static int allocate_rcd_index(struct hfi1_devdata *dd, 261 struct hfi1_ctxtdata *rcd, u16 *index) 262 { 263 unsigned long flags; 264 u16 ctxt; 265 266 spin_lock_irqsave(&dd->uctxt_lock, flags); 267 for (ctxt = 0; ctxt < dd->num_rcv_contexts; ctxt++) 268 if (!dd->rcd[ctxt]) 269 break; 270 271 if (ctxt < dd->num_rcv_contexts) { 272 rcd->ctxt = ctxt; 273 dd->rcd[ctxt] = rcd; 274 hfi1_rcd_init(rcd); 275 } 276 spin_unlock_irqrestore(&dd->uctxt_lock, flags); 277 278 if (ctxt >= dd->num_rcv_contexts) 279 return -EBUSY; 280 281 *index = ctxt; 282 283 return 0; 284 } 285 286 /** 287 * hfi1_rcd_get_by_index_safe - validate the ctxt index before accessing the 288 * array 289 * @dd: pointer to a valid devdata structure 290 * @ctxt: the index of an possilbe rcd 291 * 292 * This is a wrapper for hfi1_rcd_get_by_index() to validate that the given 293 * ctxt index is valid. 294 * 295 * The caller is responsible for making the _put(). 296 * 297 */ 298 struct hfi1_ctxtdata *hfi1_rcd_get_by_index_safe(struct hfi1_devdata *dd, 299 u16 ctxt) 300 { 301 if (ctxt < dd->num_rcv_contexts) 302 return hfi1_rcd_get_by_index(dd, ctxt); 303 304 return NULL; 305 } 306 307 /** 308 * hfi1_rcd_get_by_index 309 * @dd: pointer to a valid devdata structure 310 * @ctxt: the index of an possilbe rcd 311 * 312 * We need to protect access to the rcd array. If access is needed to 313 * one or more index, get the protecting spinlock and then increment the 314 * kref. 315 * 316 * The caller is responsible for making the _put(). 317 * 318 */ 319 struct hfi1_ctxtdata *hfi1_rcd_get_by_index(struct hfi1_devdata *dd, u16 ctxt) 320 { 321 unsigned long flags; 322 struct hfi1_ctxtdata *rcd = NULL; 323 324 spin_lock_irqsave(&dd->uctxt_lock, flags); 325 if (dd->rcd[ctxt]) { 326 rcd = dd->rcd[ctxt]; 327 hfi1_rcd_get(rcd); 328 } 329 spin_unlock_irqrestore(&dd->uctxt_lock, flags); 330 331 return rcd; 332 } 333 334 /* 335 * Common code for user and kernel context create and setup. 336 * NOTE: the initial kref is done here (hf1_rcd_init()). 337 */ 338 int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa, 339 struct hfi1_ctxtdata **context) 340 { 341 struct hfi1_devdata *dd = ppd->dd; 342 struct hfi1_ctxtdata *rcd; 343 unsigned kctxt_ngroups = 0; 344 u32 base; 345 346 if (dd->rcv_entries.nctxt_extra > 347 dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt) 348 kctxt_ngroups = (dd->rcv_entries.nctxt_extra - 349 (dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt)); 350 rcd = kzalloc_node(sizeof(*rcd), GFP_KERNEL, numa); 351 if (rcd) { 352 u32 rcvtids, max_entries; 353 u16 ctxt; 354 int ret; 355 356 ret = allocate_rcd_index(dd, rcd, &ctxt); 357 if (ret) { 358 *context = NULL; 359 kfree(rcd); 360 return ret; 361 } 362 363 INIT_LIST_HEAD(&rcd->qp_wait_list); 364 hfi1_exp_tid_group_init(rcd); 365 rcd->ppd = ppd; 366 rcd->dd = dd; 367 rcd->numa_id = numa; 368 rcd->rcv_array_groups = dd->rcv_entries.ngroups; 369 rcd->rhf_rcv_function_map = normal_rhf_rcv_functions; 370 371 mutex_init(&rcd->exp_mutex); 372 373 hfi1_cdbg(PROC, "setting up context %u\n", rcd->ctxt); 374 375 /* 376 * Calculate the context's RcvArray entry starting point. 377 * We do this here because we have to take into account all 378 * the RcvArray entries that previous context would have 379 * taken and we have to account for any extra groups assigned 380 * to the static (kernel) or dynamic (vnic/user) contexts. 381 */ 382 if (ctxt < dd->first_dyn_alloc_ctxt) { 383 if (ctxt < kctxt_ngroups) { 384 base = ctxt * (dd->rcv_entries.ngroups + 1); 385 rcd->rcv_array_groups++; 386 } else { 387 base = kctxt_ngroups + 388 (ctxt * dd->rcv_entries.ngroups); 389 } 390 } else { 391 u16 ct = ctxt - dd->first_dyn_alloc_ctxt; 392 393 base = ((dd->n_krcv_queues * dd->rcv_entries.ngroups) + 394 kctxt_ngroups); 395 if (ct < dd->rcv_entries.nctxt_extra) { 396 base += ct * (dd->rcv_entries.ngroups + 1); 397 rcd->rcv_array_groups++; 398 } else { 399 base += dd->rcv_entries.nctxt_extra + 400 (ct * dd->rcv_entries.ngroups); 401 } 402 } 403 rcd->eager_base = base * dd->rcv_entries.group_size; 404 405 rcd->rcvhdrq_cnt = rcvhdrcnt; 406 rcd->rcvhdrqentsize = hfi1_hdrq_entsize; 407 rcd->rhf_offset = 408 rcd->rcvhdrqentsize - sizeof(u64) / sizeof(u32); 409 /* 410 * Simple Eager buffer allocation: we have already pre-allocated 411 * the number of RcvArray entry groups. Each ctxtdata structure 412 * holds the number of groups for that context. 413 * 414 * To follow CSR requirements and maintain cacheline alignment, 415 * make sure all sizes and bases are multiples of group_size. 416 * 417 * The expected entry count is what is left after assigning 418 * eager. 419 */ 420 max_entries = rcd->rcv_array_groups * 421 dd->rcv_entries.group_size; 422 rcvtids = ((max_entries * hfi1_rcvarr_split) / 100); 423 rcd->egrbufs.count = round_down(rcvtids, 424 dd->rcv_entries.group_size); 425 if (rcd->egrbufs.count > MAX_EAGER_ENTRIES) { 426 dd_dev_err(dd, "ctxt%u: requested too many RcvArray entries.\n", 427 rcd->ctxt); 428 rcd->egrbufs.count = MAX_EAGER_ENTRIES; 429 } 430 hfi1_cdbg(PROC, 431 "ctxt%u: max Eager buffer RcvArray entries: %u\n", 432 rcd->ctxt, rcd->egrbufs.count); 433 434 /* 435 * Allocate array that will hold the eager buffer accounting 436 * data. 437 * This will allocate the maximum possible buffer count based 438 * on the value of the RcvArray split parameter. 439 * The resulting value will be rounded down to the closest 440 * multiple of dd->rcv_entries.group_size. 441 */ 442 rcd->egrbufs.buffers = 443 kcalloc_node(rcd->egrbufs.count, 444 sizeof(*rcd->egrbufs.buffers), 445 GFP_KERNEL, numa); 446 if (!rcd->egrbufs.buffers) 447 goto bail; 448 rcd->egrbufs.rcvtids = 449 kcalloc_node(rcd->egrbufs.count, 450 sizeof(*rcd->egrbufs.rcvtids), 451 GFP_KERNEL, numa); 452 if (!rcd->egrbufs.rcvtids) 453 goto bail; 454 rcd->egrbufs.size = eager_buffer_size; 455 /* 456 * The size of the buffers programmed into the RcvArray 457 * entries needs to be big enough to handle the highest 458 * MTU supported. 459 */ 460 if (rcd->egrbufs.size < hfi1_max_mtu) { 461 rcd->egrbufs.size = __roundup_pow_of_two(hfi1_max_mtu); 462 hfi1_cdbg(PROC, 463 "ctxt%u: eager bufs size too small. Adjusting to %zu\n", 464 rcd->ctxt, rcd->egrbufs.size); 465 } 466 rcd->egrbufs.rcvtid_size = HFI1_MAX_EAGER_BUFFER_SIZE; 467 468 /* Applicable only for statically created kernel contexts */ 469 if (ctxt < dd->first_dyn_alloc_ctxt) { 470 rcd->opstats = kzalloc_node(sizeof(*rcd->opstats), 471 GFP_KERNEL, numa); 472 if (!rcd->opstats) 473 goto bail; 474 } 475 476 *context = rcd; 477 return 0; 478 } 479 480 bail: 481 *context = NULL; 482 hfi1_free_ctxt(rcd); 483 return -ENOMEM; 484 } 485 486 /** 487 * hfi1_free_ctxt 488 * @rcd: pointer to an initialized rcd data structure 489 * 490 * This wrapper is the free function that matches hfi1_create_ctxtdata(). 491 * When a context is done being used (kernel or user), this function is called 492 * for the "final" put to match the kref init from hf1i_create_ctxtdata(). 493 * Other users of the context do a get/put sequence to make sure that the 494 * structure isn't removed while in use. 495 */ 496 void hfi1_free_ctxt(struct hfi1_ctxtdata *rcd) 497 { 498 hfi1_rcd_put(rcd); 499 } 500 501 /* 502 * Convert a receive header entry size that to the encoding used in the CSR. 503 * 504 * Return a zero if the given size is invalid. 505 */ 506 static inline u64 encode_rcv_header_entry_size(u16 size) 507 { 508 /* there are only 3 valid receive header entry sizes */ 509 if (size == 2) 510 return 1; 511 if (size == 16) 512 return 2; 513 else if (size == 32) 514 return 4; 515 return 0; /* invalid */ 516 } 517 518 /* 519 * Select the largest ccti value over all SLs to determine the intra- 520 * packet gap for the link. 521 * 522 * called with cca_timer_lock held (to protect access to cca_timer 523 * array), and rcu_read_lock() (to protect access to cc_state). 524 */ 525 void set_link_ipg(struct hfi1_pportdata *ppd) 526 { 527 struct hfi1_devdata *dd = ppd->dd; 528 struct cc_state *cc_state; 529 int i; 530 u16 cce, ccti_limit, max_ccti = 0; 531 u16 shift, mult; 532 u64 src; 533 u32 current_egress_rate; /* Mbits /sec */ 534 u32 max_pkt_time; 535 /* 536 * max_pkt_time is the maximum packet egress time in units 537 * of the fabric clock period 1/(805 MHz). 538 */ 539 540 cc_state = get_cc_state(ppd); 541 542 if (!cc_state) 543 /* 544 * This should _never_ happen - rcu_read_lock() is held, 545 * and set_link_ipg() should not be called if cc_state 546 * is NULL. 547 */ 548 return; 549 550 for (i = 0; i < OPA_MAX_SLS; i++) { 551 u16 ccti = ppd->cca_timer[i].ccti; 552 553 if (ccti > max_ccti) 554 max_ccti = ccti; 555 } 556 557 ccti_limit = cc_state->cct.ccti_limit; 558 if (max_ccti > ccti_limit) 559 max_ccti = ccti_limit; 560 561 cce = cc_state->cct.entries[max_ccti].entry; 562 shift = (cce & 0xc000) >> 14; 563 mult = (cce & 0x3fff); 564 565 current_egress_rate = active_egress_rate(ppd); 566 567 max_pkt_time = egress_cycles(ppd->ibmaxlen, current_egress_rate); 568 569 src = (max_pkt_time >> shift) * mult; 570 571 src &= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SMASK; 572 src <<= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SHIFT; 573 574 write_csr(dd, SEND_STATIC_RATE_CONTROL, src); 575 } 576 577 static enum hrtimer_restart cca_timer_fn(struct hrtimer *t) 578 { 579 struct cca_timer *cca_timer; 580 struct hfi1_pportdata *ppd; 581 int sl; 582 u16 ccti_timer, ccti_min; 583 struct cc_state *cc_state; 584 unsigned long flags; 585 enum hrtimer_restart ret = HRTIMER_NORESTART; 586 587 cca_timer = container_of(t, struct cca_timer, hrtimer); 588 ppd = cca_timer->ppd; 589 sl = cca_timer->sl; 590 591 rcu_read_lock(); 592 593 cc_state = get_cc_state(ppd); 594 595 if (!cc_state) { 596 rcu_read_unlock(); 597 return HRTIMER_NORESTART; 598 } 599 600 /* 601 * 1) decrement ccti for SL 602 * 2) calculate IPG for link (set_link_ipg()) 603 * 3) restart timer, unless ccti is at min value 604 */ 605 606 ccti_min = cc_state->cong_setting.entries[sl].ccti_min; 607 ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer; 608 609 spin_lock_irqsave(&ppd->cca_timer_lock, flags); 610 611 if (cca_timer->ccti > ccti_min) { 612 cca_timer->ccti--; 613 set_link_ipg(ppd); 614 } 615 616 if (cca_timer->ccti > ccti_min) { 617 unsigned long nsec = 1024 * ccti_timer; 618 /* ccti_timer is in units of 1.024 usec */ 619 hrtimer_forward_now(t, ns_to_ktime(nsec)); 620 ret = HRTIMER_RESTART; 621 } 622 623 spin_unlock_irqrestore(&ppd->cca_timer_lock, flags); 624 rcu_read_unlock(); 625 return ret; 626 } 627 628 /* 629 * Common code for initializing the physical port structure. 630 */ 631 void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd, 632 struct hfi1_devdata *dd, u8 hw_pidx, u8 port) 633 { 634 int i; 635 uint default_pkey_idx; 636 struct cc_state *cc_state; 637 638 ppd->dd = dd; 639 ppd->hw_pidx = hw_pidx; 640 ppd->port = port; /* IB port number, not index */ 641 ppd->prev_link_width = LINK_WIDTH_DEFAULT; 642 /* 643 * There are C_VL_COUNT number of PortVLXmitWait counters. 644 * Adding 1 to C_VL_COUNT to include the PortXmitWait counter. 645 */ 646 for (i = 0; i < C_VL_COUNT + 1; i++) { 647 ppd->port_vl_xmit_wait_last[i] = 0; 648 ppd->vl_xmit_flit_cnt[i] = 0; 649 } 650 651 default_pkey_idx = 1; 652 653 ppd->pkeys[default_pkey_idx] = DEFAULT_P_KEY; 654 ppd->part_enforce |= HFI1_PART_ENFORCE_IN; 655 656 if (loopback) { 657 hfi1_early_err(&pdev->dev, 658 "Faking data partition 0x8001 in idx %u\n", 659 !default_pkey_idx); 660 ppd->pkeys[!default_pkey_idx] = 0x8001; 661 } 662 663 INIT_WORK(&ppd->link_vc_work, handle_verify_cap); 664 INIT_WORK(&ppd->link_up_work, handle_link_up); 665 INIT_WORK(&ppd->link_down_work, handle_link_down); 666 INIT_WORK(&ppd->freeze_work, handle_freeze); 667 INIT_WORK(&ppd->link_downgrade_work, handle_link_downgrade); 668 INIT_WORK(&ppd->sma_message_work, handle_sma_message); 669 INIT_WORK(&ppd->link_bounce_work, handle_link_bounce); 670 INIT_DELAYED_WORK(&ppd->start_link_work, handle_start_link); 671 INIT_WORK(&ppd->linkstate_active_work, receive_interrupt_work); 672 INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event); 673 674 mutex_init(&ppd->hls_lock); 675 spin_lock_init(&ppd->qsfp_info.qsfp_lock); 676 677 ppd->qsfp_info.ppd = ppd; 678 ppd->sm_trap_qp = 0x0; 679 ppd->sa_qp = 0x1; 680 681 ppd->hfi1_wq = NULL; 682 683 spin_lock_init(&ppd->cca_timer_lock); 684 685 for (i = 0; i < OPA_MAX_SLS; i++) { 686 hrtimer_init(&ppd->cca_timer[i].hrtimer, CLOCK_MONOTONIC, 687 HRTIMER_MODE_REL); 688 ppd->cca_timer[i].ppd = ppd; 689 ppd->cca_timer[i].sl = i; 690 ppd->cca_timer[i].ccti = 0; 691 ppd->cca_timer[i].hrtimer.function = cca_timer_fn; 692 } 693 694 ppd->cc_max_table_entries = IB_CC_TABLE_CAP_DEFAULT; 695 696 spin_lock_init(&ppd->cc_state_lock); 697 spin_lock_init(&ppd->cc_log_lock); 698 cc_state = kzalloc(sizeof(*cc_state), GFP_KERNEL); 699 RCU_INIT_POINTER(ppd->cc_state, cc_state); 700 if (!cc_state) 701 goto bail; 702 return; 703 704 bail: 705 706 hfi1_early_err(&pdev->dev, 707 "Congestion Control Agent disabled for port %d\n", port); 708 } 709 710 /* 711 * Do initialization for device that is only needed on 712 * first detect, not on resets. 713 */ 714 static int loadtime_init(struct hfi1_devdata *dd) 715 { 716 return 0; 717 } 718 719 /** 720 * init_after_reset - re-initialize after a reset 721 * @dd: the hfi1_ib device 722 * 723 * sanity check at least some of the values after reset, and 724 * ensure no receive or transmit (explicitly, in case reset 725 * failed 726 */ 727 static int init_after_reset(struct hfi1_devdata *dd) 728 { 729 int i; 730 struct hfi1_ctxtdata *rcd; 731 /* 732 * Ensure chip does no sends or receives, tail updates, or 733 * pioavail updates while we re-initialize. This is mostly 734 * for the driver data structures, not chip registers. 735 */ 736 for (i = 0; i < dd->num_rcv_contexts; i++) { 737 rcd = hfi1_rcd_get_by_index(dd, i); 738 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS | 739 HFI1_RCVCTRL_INTRAVAIL_DIS | 740 HFI1_RCVCTRL_TAILUPD_DIS, rcd); 741 hfi1_rcd_put(rcd); 742 } 743 pio_send_control(dd, PSC_GLOBAL_DISABLE); 744 for (i = 0; i < dd->num_send_contexts; i++) 745 sc_disable(dd->send_contexts[i].sc); 746 747 return 0; 748 } 749 750 static void enable_chip(struct hfi1_devdata *dd) 751 { 752 struct hfi1_ctxtdata *rcd; 753 u32 rcvmask; 754 u16 i; 755 756 /* enable PIO send */ 757 pio_send_control(dd, PSC_GLOBAL_ENABLE); 758 759 /* 760 * Enable kernel ctxts' receive and receive interrupt. 761 * Other ctxts done as user opens and initializes them. 762 */ 763 for (i = 0; i < dd->first_dyn_alloc_ctxt; ++i) { 764 rcd = hfi1_rcd_get_by_index(dd, i); 765 if (!rcd) 766 continue; 767 rcvmask = HFI1_RCVCTRL_CTXT_ENB | HFI1_RCVCTRL_INTRAVAIL_ENB; 768 rcvmask |= HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ? 769 HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS; 770 if (!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) 771 rcvmask |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB; 772 if (HFI1_CAP_KGET_MASK(rcd->flags, NODROP_RHQ_FULL)) 773 rcvmask |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB; 774 if (HFI1_CAP_KGET_MASK(rcd->flags, NODROP_EGR_FULL)) 775 rcvmask |= HFI1_RCVCTRL_NO_EGR_DROP_ENB; 776 hfi1_rcvctrl(dd, rcvmask, rcd); 777 sc_enable(rcd->sc); 778 hfi1_rcd_put(rcd); 779 } 780 } 781 782 /** 783 * create_workqueues - create per port workqueues 784 * @dd: the hfi1_ib device 785 */ 786 static int create_workqueues(struct hfi1_devdata *dd) 787 { 788 int pidx; 789 struct hfi1_pportdata *ppd; 790 791 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 792 ppd = dd->pport + pidx; 793 if (!ppd->hfi1_wq) { 794 ppd->hfi1_wq = 795 alloc_workqueue( 796 "hfi%d_%d", 797 WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE, 798 HFI1_MAX_ACTIVE_WORKQUEUE_ENTRIES, 799 dd->unit, pidx); 800 if (!ppd->hfi1_wq) 801 goto wq_error; 802 } 803 if (!ppd->link_wq) { 804 /* 805 * Make the link workqueue single-threaded to enforce 806 * serialization. 807 */ 808 ppd->link_wq = 809 alloc_workqueue( 810 "hfi_link_%d_%d", 811 WQ_SYSFS | WQ_MEM_RECLAIM | WQ_UNBOUND, 812 1, /* max_active */ 813 dd->unit, pidx); 814 if (!ppd->link_wq) 815 goto wq_error; 816 } 817 } 818 return 0; 819 wq_error: 820 pr_err("alloc_workqueue failed for port %d\n", pidx + 1); 821 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 822 ppd = dd->pport + pidx; 823 if (ppd->hfi1_wq) { 824 destroy_workqueue(ppd->hfi1_wq); 825 ppd->hfi1_wq = NULL; 826 } 827 if (ppd->link_wq) { 828 destroy_workqueue(ppd->link_wq); 829 ppd->link_wq = NULL; 830 } 831 } 832 return -ENOMEM; 833 } 834 835 /** 836 * hfi1_init - do the actual initialization sequence on the chip 837 * @dd: the hfi1_ib device 838 * @reinit: re-initializing, so don't allocate new memory 839 * 840 * Do the actual initialization sequence on the chip. This is done 841 * both from the init routine called from the PCI infrastructure, and 842 * when we reset the chip, or detect that it was reset internally, 843 * or it's administratively re-enabled. 844 * 845 * Memory allocation here and in called routines is only done in 846 * the first case (reinit == 0). We have to be careful, because even 847 * without memory allocation, we need to re-write all the chip registers 848 * TIDs, etc. after the reset or enable has completed. 849 */ 850 int hfi1_init(struct hfi1_devdata *dd, int reinit) 851 { 852 int ret = 0, pidx, lastfail = 0; 853 unsigned long len; 854 u16 i; 855 struct hfi1_ctxtdata *rcd; 856 struct hfi1_pportdata *ppd; 857 858 /* Set up send low level handlers */ 859 dd->process_pio_send = hfi1_verbs_send_pio; 860 dd->process_dma_send = hfi1_verbs_send_dma; 861 dd->pio_inline_send = pio_copy; 862 dd->process_vnic_dma_send = hfi1_vnic_send_dma; 863 864 if (is_ax(dd)) { 865 atomic_set(&dd->drop_packet, DROP_PACKET_ON); 866 dd->do_drop = 1; 867 } else { 868 atomic_set(&dd->drop_packet, DROP_PACKET_OFF); 869 dd->do_drop = 0; 870 } 871 872 /* make sure the link is not "up" */ 873 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 874 ppd = dd->pport + pidx; 875 ppd->linkup = 0; 876 } 877 878 if (reinit) 879 ret = init_after_reset(dd); 880 else 881 ret = loadtime_init(dd); 882 if (ret) 883 goto done; 884 885 /* allocate dummy tail memory for all receive contexts */ 886 dd->rcvhdrtail_dummy_kvaddr = dma_zalloc_coherent( 887 &dd->pcidev->dev, sizeof(u64), 888 &dd->rcvhdrtail_dummy_dma, 889 GFP_KERNEL); 890 891 if (!dd->rcvhdrtail_dummy_kvaddr) { 892 dd_dev_err(dd, "cannot allocate dummy tail memory\n"); 893 ret = -ENOMEM; 894 goto done; 895 } 896 897 /* dd->rcd can be NULL if early initialization failed */ 898 for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i) { 899 /* 900 * Set up the (kernel) rcvhdr queue and egr TIDs. If doing 901 * re-init, the simplest way to handle this is to free 902 * existing, and re-allocate. 903 * Need to re-create rest of ctxt 0 ctxtdata as well. 904 */ 905 rcd = hfi1_rcd_get_by_index(dd, i); 906 if (!rcd) 907 continue; 908 909 rcd->do_interrupt = &handle_receive_interrupt; 910 911 lastfail = hfi1_create_rcvhdrq(dd, rcd); 912 if (!lastfail) 913 lastfail = hfi1_setup_eagerbufs(rcd); 914 if (lastfail) { 915 dd_dev_err(dd, 916 "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n"); 917 ret = lastfail; 918 } 919 hfi1_rcd_put(rcd); 920 } 921 922 /* Allocate enough memory for user event notification. */ 923 len = PAGE_ALIGN(chip_rcv_contexts(dd) * HFI1_MAX_SHARED_CTXTS * 924 sizeof(*dd->events)); 925 dd->events = vmalloc_user(len); 926 if (!dd->events) 927 dd_dev_err(dd, "Failed to allocate user events page\n"); 928 /* 929 * Allocate a page for device and port status. 930 * Page will be shared amongst all user processes. 931 */ 932 dd->status = vmalloc_user(PAGE_SIZE); 933 if (!dd->status) 934 dd_dev_err(dd, "Failed to allocate dev status page\n"); 935 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 936 ppd = dd->pport + pidx; 937 if (dd->status) 938 /* Currently, we only have one port */ 939 ppd->statusp = &dd->status->port; 940 941 set_mtu(ppd); 942 } 943 944 /* enable chip even if we have an error, so we can debug cause */ 945 enable_chip(dd); 946 947 done: 948 /* 949 * Set status even if port serdes is not initialized 950 * so that diags will work. 951 */ 952 if (dd->status) 953 dd->status->dev |= HFI1_STATUS_CHIP_PRESENT | 954 HFI1_STATUS_INITTED; 955 if (!ret) { 956 /* enable all interrupts from the chip */ 957 set_intr_state(dd, 1); 958 959 /* chip is OK for user apps; mark it as initialized */ 960 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 961 ppd = dd->pport + pidx; 962 963 /* 964 * start the serdes - must be after interrupts are 965 * enabled so we are notified when the link goes up 966 */ 967 lastfail = bringup_serdes(ppd); 968 if (lastfail) 969 dd_dev_info(dd, 970 "Failed to bring up port %u\n", 971 ppd->port); 972 973 /* 974 * Set status even if port serdes is not initialized 975 * so that diags will work. 976 */ 977 if (ppd->statusp) 978 *ppd->statusp |= HFI1_STATUS_CHIP_PRESENT | 979 HFI1_STATUS_INITTED; 980 if (!ppd->link_speed_enabled) 981 continue; 982 } 983 } 984 985 /* if ret is non-zero, we probably should do some cleanup here... */ 986 return ret; 987 } 988 989 static inline struct hfi1_devdata *__hfi1_lookup(int unit) 990 { 991 return idr_find(&hfi1_unit_table, unit); 992 } 993 994 struct hfi1_devdata *hfi1_lookup(int unit) 995 { 996 struct hfi1_devdata *dd; 997 unsigned long flags; 998 999 spin_lock_irqsave(&hfi1_devs_lock, flags); 1000 dd = __hfi1_lookup(unit); 1001 spin_unlock_irqrestore(&hfi1_devs_lock, flags); 1002 1003 return dd; 1004 } 1005 1006 /* 1007 * Stop the timers during unit shutdown, or after an error late 1008 * in initialization. 1009 */ 1010 static void stop_timers(struct hfi1_devdata *dd) 1011 { 1012 struct hfi1_pportdata *ppd; 1013 int pidx; 1014 1015 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1016 ppd = dd->pport + pidx; 1017 if (ppd->led_override_timer.function) { 1018 del_timer_sync(&ppd->led_override_timer); 1019 atomic_set(&ppd->led_override_timer_active, 0); 1020 } 1021 } 1022 } 1023 1024 /** 1025 * shutdown_device - shut down a device 1026 * @dd: the hfi1_ib device 1027 * 1028 * This is called to make the device quiet when we are about to 1029 * unload the driver, and also when the device is administratively 1030 * disabled. It does not free any data structures. 1031 * Everything it does has to be setup again by hfi1_init(dd, 1) 1032 */ 1033 static void shutdown_device(struct hfi1_devdata *dd) 1034 { 1035 struct hfi1_pportdata *ppd; 1036 struct hfi1_ctxtdata *rcd; 1037 unsigned pidx; 1038 int i; 1039 1040 if (dd->flags & HFI1_SHUTDOWN) 1041 return; 1042 dd->flags |= HFI1_SHUTDOWN; 1043 1044 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1045 ppd = dd->pport + pidx; 1046 1047 ppd->linkup = 0; 1048 if (ppd->statusp) 1049 *ppd->statusp &= ~(HFI1_STATUS_IB_CONF | 1050 HFI1_STATUS_IB_READY); 1051 } 1052 dd->flags &= ~HFI1_INITTED; 1053 1054 /* mask and clean up interrupts, but not errors */ 1055 set_intr_state(dd, 0); 1056 hfi1_clean_up_interrupts(dd); 1057 1058 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1059 ppd = dd->pport + pidx; 1060 for (i = 0; i < dd->num_rcv_contexts; i++) { 1061 rcd = hfi1_rcd_get_by_index(dd, i); 1062 hfi1_rcvctrl(dd, HFI1_RCVCTRL_TAILUPD_DIS | 1063 HFI1_RCVCTRL_CTXT_DIS | 1064 HFI1_RCVCTRL_INTRAVAIL_DIS | 1065 HFI1_RCVCTRL_PKEY_DIS | 1066 HFI1_RCVCTRL_ONE_PKT_EGR_DIS, rcd); 1067 hfi1_rcd_put(rcd); 1068 } 1069 /* 1070 * Gracefully stop all sends allowing any in progress to 1071 * trickle out first. 1072 */ 1073 for (i = 0; i < dd->num_send_contexts; i++) 1074 sc_flush(dd->send_contexts[i].sc); 1075 } 1076 1077 /* 1078 * Enough for anything that's going to trickle out to have actually 1079 * done so. 1080 */ 1081 udelay(20); 1082 1083 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1084 ppd = dd->pport + pidx; 1085 1086 /* disable all contexts */ 1087 for (i = 0; i < dd->num_send_contexts; i++) 1088 sc_disable(dd->send_contexts[i].sc); 1089 /* disable the send device */ 1090 pio_send_control(dd, PSC_GLOBAL_DISABLE); 1091 1092 shutdown_led_override(ppd); 1093 1094 /* 1095 * Clear SerdesEnable. 1096 * We can't count on interrupts since we are stopping. 1097 */ 1098 hfi1_quiet_serdes(ppd); 1099 1100 if (ppd->hfi1_wq) { 1101 destroy_workqueue(ppd->hfi1_wq); 1102 ppd->hfi1_wq = NULL; 1103 } 1104 if (ppd->link_wq) { 1105 destroy_workqueue(ppd->link_wq); 1106 ppd->link_wq = NULL; 1107 } 1108 } 1109 sdma_exit(dd); 1110 } 1111 1112 /** 1113 * hfi1_free_ctxtdata - free a context's allocated data 1114 * @dd: the hfi1_ib device 1115 * @rcd: the ctxtdata structure 1116 * 1117 * free up any allocated data for a context 1118 * It should never change any chip state, or global driver state. 1119 */ 1120 void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) 1121 { 1122 u32 e; 1123 1124 if (!rcd) 1125 return; 1126 1127 if (rcd->rcvhdrq) { 1128 dma_free_coherent(&dd->pcidev->dev, rcvhdrq_size(rcd), 1129 rcd->rcvhdrq, rcd->rcvhdrq_dma); 1130 rcd->rcvhdrq = NULL; 1131 if (rcd->rcvhdrtail_kvaddr) { 1132 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, 1133 (void *)rcd->rcvhdrtail_kvaddr, 1134 rcd->rcvhdrqtailaddr_dma); 1135 rcd->rcvhdrtail_kvaddr = NULL; 1136 } 1137 } 1138 1139 /* all the RcvArray entries should have been cleared by now */ 1140 kfree(rcd->egrbufs.rcvtids); 1141 rcd->egrbufs.rcvtids = NULL; 1142 1143 for (e = 0; e < rcd->egrbufs.alloced; e++) { 1144 if (rcd->egrbufs.buffers[e].dma) 1145 dma_free_coherent(&dd->pcidev->dev, 1146 rcd->egrbufs.buffers[e].len, 1147 rcd->egrbufs.buffers[e].addr, 1148 rcd->egrbufs.buffers[e].dma); 1149 } 1150 kfree(rcd->egrbufs.buffers); 1151 rcd->egrbufs.alloced = 0; 1152 rcd->egrbufs.buffers = NULL; 1153 1154 sc_free(rcd->sc); 1155 rcd->sc = NULL; 1156 1157 vfree(rcd->subctxt_uregbase); 1158 vfree(rcd->subctxt_rcvegrbuf); 1159 vfree(rcd->subctxt_rcvhdr_base); 1160 kfree(rcd->opstats); 1161 1162 rcd->subctxt_uregbase = NULL; 1163 rcd->subctxt_rcvegrbuf = NULL; 1164 rcd->subctxt_rcvhdr_base = NULL; 1165 rcd->opstats = NULL; 1166 } 1167 1168 /* 1169 * Release our hold on the shared asic data. If we are the last one, 1170 * return the structure to be finalized outside the lock. Must be 1171 * holding hfi1_devs_lock. 1172 */ 1173 static struct hfi1_asic_data *release_asic_data(struct hfi1_devdata *dd) 1174 { 1175 struct hfi1_asic_data *ad; 1176 int other; 1177 1178 if (!dd->asic_data) 1179 return NULL; 1180 dd->asic_data->dds[dd->hfi1_id] = NULL; 1181 other = dd->hfi1_id ? 0 : 1; 1182 ad = dd->asic_data; 1183 dd->asic_data = NULL; 1184 /* return NULL if the other dd still has a link */ 1185 return ad->dds[other] ? NULL : ad; 1186 } 1187 1188 static void finalize_asic_data(struct hfi1_devdata *dd, 1189 struct hfi1_asic_data *ad) 1190 { 1191 clean_up_i2c(dd, ad); 1192 kfree(ad); 1193 } 1194 1195 /** 1196 * hfi1_clean_devdata - cleans up per-unit data structure 1197 * @dd: pointer to a valid devdata structure 1198 * 1199 * It cleans up all data structures set up by 1200 * by hfi1_alloc_devdata(). 1201 */ 1202 static void hfi1_clean_devdata(struct hfi1_devdata *dd) 1203 { 1204 struct hfi1_asic_data *ad; 1205 unsigned long flags; 1206 1207 spin_lock_irqsave(&hfi1_devs_lock, flags); 1208 if (!list_empty(&dd->list)) { 1209 idr_remove(&hfi1_unit_table, dd->unit); 1210 list_del_init(&dd->list); 1211 } 1212 ad = release_asic_data(dd); 1213 spin_unlock_irqrestore(&hfi1_devs_lock, flags); 1214 1215 finalize_asic_data(dd, ad); 1216 free_platform_config(dd); 1217 rcu_barrier(); /* wait for rcu callbacks to complete */ 1218 free_percpu(dd->int_counter); 1219 free_percpu(dd->rcv_limit); 1220 free_percpu(dd->send_schedule); 1221 free_percpu(dd->tx_opstats); 1222 dd->int_counter = NULL; 1223 dd->rcv_limit = NULL; 1224 dd->send_schedule = NULL; 1225 dd->tx_opstats = NULL; 1226 kfree(dd->comp_vect); 1227 dd->comp_vect = NULL; 1228 sdma_clean(dd, dd->num_sdma); 1229 rvt_dealloc_device(&dd->verbs_dev.rdi); 1230 } 1231 1232 static void __hfi1_free_devdata(struct kobject *kobj) 1233 { 1234 struct hfi1_devdata *dd = 1235 container_of(kobj, struct hfi1_devdata, kobj); 1236 1237 hfi1_clean_devdata(dd); 1238 } 1239 1240 static struct kobj_type hfi1_devdata_type = { 1241 .release = __hfi1_free_devdata, 1242 }; 1243 1244 void hfi1_free_devdata(struct hfi1_devdata *dd) 1245 { 1246 kobject_put(&dd->kobj); 1247 } 1248 1249 /* 1250 * Allocate our primary per-unit data structure. Must be done via verbs 1251 * allocator, because the verbs cleanup process both does cleanup and 1252 * free of the data structure. 1253 * "extra" is for chip-specific data. 1254 * 1255 * Use the idr mechanism to get a unit number for this unit. 1256 */ 1257 struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra) 1258 { 1259 unsigned long flags; 1260 struct hfi1_devdata *dd; 1261 int ret, nports; 1262 1263 /* extra is * number of ports */ 1264 nports = extra / sizeof(struct hfi1_pportdata); 1265 1266 dd = (struct hfi1_devdata *)rvt_alloc_device(sizeof(*dd) + extra, 1267 nports); 1268 if (!dd) 1269 return ERR_PTR(-ENOMEM); 1270 dd->num_pports = nports; 1271 dd->pport = (struct hfi1_pportdata *)(dd + 1); 1272 dd->pcidev = pdev; 1273 pci_set_drvdata(pdev, dd); 1274 1275 INIT_LIST_HEAD(&dd->list); 1276 idr_preload(GFP_KERNEL); 1277 spin_lock_irqsave(&hfi1_devs_lock, flags); 1278 1279 ret = idr_alloc(&hfi1_unit_table, dd, 0, 0, GFP_NOWAIT); 1280 if (ret >= 0) { 1281 dd->unit = ret; 1282 list_add(&dd->list, &hfi1_dev_list); 1283 } 1284 dd->node = -1; 1285 1286 spin_unlock_irqrestore(&hfi1_devs_lock, flags); 1287 idr_preload_end(); 1288 1289 if (ret < 0) { 1290 hfi1_early_err(&pdev->dev, 1291 "Could not allocate unit ID: error %d\n", -ret); 1292 goto bail; 1293 } 1294 rvt_set_ibdev_name(&dd->verbs_dev.rdi, "%s_%d", class_name(), dd->unit); 1295 1296 /* 1297 * Initialize all locks for the device. This needs to be as early as 1298 * possible so locks are usable. 1299 */ 1300 spin_lock_init(&dd->sc_lock); 1301 spin_lock_init(&dd->sendctrl_lock); 1302 spin_lock_init(&dd->rcvctrl_lock); 1303 spin_lock_init(&dd->uctxt_lock); 1304 spin_lock_init(&dd->hfi1_diag_trans_lock); 1305 spin_lock_init(&dd->sc_init_lock); 1306 spin_lock_init(&dd->dc8051_memlock); 1307 seqlock_init(&dd->sc2vl_lock); 1308 spin_lock_init(&dd->sde_map_lock); 1309 spin_lock_init(&dd->pio_map_lock); 1310 mutex_init(&dd->dc8051_lock); 1311 init_waitqueue_head(&dd->event_queue); 1312 1313 dd->int_counter = alloc_percpu(u64); 1314 if (!dd->int_counter) { 1315 ret = -ENOMEM; 1316 goto bail; 1317 } 1318 1319 dd->rcv_limit = alloc_percpu(u64); 1320 if (!dd->rcv_limit) { 1321 ret = -ENOMEM; 1322 goto bail; 1323 } 1324 1325 dd->send_schedule = alloc_percpu(u64); 1326 if (!dd->send_schedule) { 1327 ret = -ENOMEM; 1328 goto bail; 1329 } 1330 1331 dd->tx_opstats = alloc_percpu(struct hfi1_opcode_stats_perctx); 1332 if (!dd->tx_opstats) { 1333 ret = -ENOMEM; 1334 goto bail; 1335 } 1336 1337 dd->comp_vect = kzalloc(sizeof(*dd->comp_vect), GFP_KERNEL); 1338 if (!dd->comp_vect) { 1339 ret = -ENOMEM; 1340 goto bail; 1341 } 1342 1343 kobject_init(&dd->kobj, &hfi1_devdata_type); 1344 return dd; 1345 1346 bail: 1347 hfi1_clean_devdata(dd); 1348 return ERR_PTR(ret); 1349 } 1350 1351 /* 1352 * Called from freeze mode handlers, and from PCI error 1353 * reporting code. Should be paranoid about state of 1354 * system and data structures. 1355 */ 1356 void hfi1_disable_after_error(struct hfi1_devdata *dd) 1357 { 1358 if (dd->flags & HFI1_INITTED) { 1359 u32 pidx; 1360 1361 dd->flags &= ~HFI1_INITTED; 1362 if (dd->pport) 1363 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1364 struct hfi1_pportdata *ppd; 1365 1366 ppd = dd->pport + pidx; 1367 if (dd->flags & HFI1_PRESENT) 1368 set_link_state(ppd, HLS_DN_DISABLE); 1369 1370 if (ppd->statusp) 1371 *ppd->statusp &= ~HFI1_STATUS_IB_READY; 1372 } 1373 } 1374 1375 /* 1376 * Mark as having had an error for driver, and also 1377 * for /sys and status word mapped to user programs. 1378 * This marks unit as not usable, until reset. 1379 */ 1380 if (dd->status) 1381 dd->status->dev |= HFI1_STATUS_HWERROR; 1382 } 1383 1384 static void remove_one(struct pci_dev *); 1385 static int init_one(struct pci_dev *, const struct pci_device_id *); 1386 static void shutdown_one(struct pci_dev *); 1387 1388 #define DRIVER_LOAD_MSG "Intel " DRIVER_NAME " loaded: " 1389 #define PFX DRIVER_NAME ": " 1390 1391 const struct pci_device_id hfi1_pci_tbl[] = { 1392 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL0) }, 1393 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL1) }, 1394 { 0, } 1395 }; 1396 1397 MODULE_DEVICE_TABLE(pci, hfi1_pci_tbl); 1398 1399 static struct pci_driver hfi1_pci_driver = { 1400 .name = DRIVER_NAME, 1401 .probe = init_one, 1402 .remove = remove_one, 1403 .shutdown = shutdown_one, 1404 .id_table = hfi1_pci_tbl, 1405 .err_handler = &hfi1_pci_err_handler, 1406 }; 1407 1408 static void __init compute_krcvqs(void) 1409 { 1410 int i; 1411 1412 for (i = 0; i < krcvqsset; i++) 1413 n_krcvqs += krcvqs[i]; 1414 } 1415 1416 /* 1417 * Do all the generic driver unit- and chip-independent memory 1418 * allocation and initialization. 1419 */ 1420 static int __init hfi1_mod_init(void) 1421 { 1422 int ret; 1423 1424 ret = dev_init(); 1425 if (ret) 1426 goto bail; 1427 1428 ret = node_affinity_init(); 1429 if (ret) 1430 goto bail; 1431 1432 /* validate max MTU before any devices start */ 1433 if (!valid_opa_max_mtu(hfi1_max_mtu)) { 1434 pr_err("Invalid max_mtu 0x%x, using 0x%x instead\n", 1435 hfi1_max_mtu, HFI1_DEFAULT_MAX_MTU); 1436 hfi1_max_mtu = HFI1_DEFAULT_MAX_MTU; 1437 } 1438 /* valid CUs run from 1-128 in powers of 2 */ 1439 if (hfi1_cu > 128 || !is_power_of_2(hfi1_cu)) 1440 hfi1_cu = 1; 1441 /* valid credit return threshold is 0-100, variable is unsigned */ 1442 if (user_credit_return_threshold > 100) 1443 user_credit_return_threshold = 100; 1444 1445 compute_krcvqs(); 1446 /* 1447 * sanitize receive interrupt count, time must wait until after 1448 * the hardware type is known 1449 */ 1450 if (rcv_intr_count > RCV_HDR_HEAD_COUNTER_MASK) 1451 rcv_intr_count = RCV_HDR_HEAD_COUNTER_MASK; 1452 /* reject invalid combinations */ 1453 if (rcv_intr_count == 0 && rcv_intr_timeout == 0) { 1454 pr_err("Invalid mode: both receive interrupt count and available timeout are zero - setting interrupt count to 1\n"); 1455 rcv_intr_count = 1; 1456 } 1457 if (rcv_intr_count > 1 && rcv_intr_timeout == 0) { 1458 /* 1459 * Avoid indefinite packet delivery by requiring a timeout 1460 * if count is > 1. 1461 */ 1462 pr_err("Invalid mode: receive interrupt count greater than 1 and available timeout is zero - setting available timeout to 1\n"); 1463 rcv_intr_timeout = 1; 1464 } 1465 if (rcv_intr_dynamic && !(rcv_intr_count > 1 && rcv_intr_timeout > 0)) { 1466 /* 1467 * The dynamic algorithm expects a non-zero timeout 1468 * and a count > 1. 1469 */ 1470 pr_err("Invalid mode: dynamic receive interrupt mitigation with invalid count and timeout - turning dynamic off\n"); 1471 rcv_intr_dynamic = 0; 1472 } 1473 1474 /* sanitize link CRC options */ 1475 link_crc_mask &= SUPPORTED_CRCS; 1476 1477 /* 1478 * These must be called before the driver is registered with 1479 * the PCI subsystem. 1480 */ 1481 idr_init(&hfi1_unit_table); 1482 1483 hfi1_dbg_init(); 1484 ret = hfi1_wss_init(); 1485 if (ret < 0) 1486 goto bail_wss; 1487 ret = pci_register_driver(&hfi1_pci_driver); 1488 if (ret < 0) { 1489 pr_err("Unable to register driver: error %d\n", -ret); 1490 goto bail_dev; 1491 } 1492 goto bail; /* all OK */ 1493 1494 bail_dev: 1495 hfi1_wss_exit(); 1496 bail_wss: 1497 hfi1_dbg_exit(); 1498 idr_destroy(&hfi1_unit_table); 1499 dev_cleanup(); 1500 bail: 1501 return ret; 1502 } 1503 1504 module_init(hfi1_mod_init); 1505 1506 /* 1507 * Do the non-unit driver cleanup, memory free, etc. at unload. 1508 */ 1509 static void __exit hfi1_mod_cleanup(void) 1510 { 1511 pci_unregister_driver(&hfi1_pci_driver); 1512 node_affinity_destroy_all(); 1513 hfi1_wss_exit(); 1514 hfi1_dbg_exit(); 1515 1516 idr_destroy(&hfi1_unit_table); 1517 dispose_firmware(); /* asymmetric with obtain_firmware() */ 1518 dev_cleanup(); 1519 } 1520 1521 module_exit(hfi1_mod_cleanup); 1522 1523 /* this can only be called after a successful initialization */ 1524 static void cleanup_device_data(struct hfi1_devdata *dd) 1525 { 1526 int ctxt; 1527 int pidx; 1528 1529 /* users can't do anything more with chip */ 1530 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1531 struct hfi1_pportdata *ppd = &dd->pport[pidx]; 1532 struct cc_state *cc_state; 1533 int i; 1534 1535 if (ppd->statusp) 1536 *ppd->statusp &= ~HFI1_STATUS_CHIP_PRESENT; 1537 1538 for (i = 0; i < OPA_MAX_SLS; i++) 1539 hrtimer_cancel(&ppd->cca_timer[i].hrtimer); 1540 1541 spin_lock(&ppd->cc_state_lock); 1542 cc_state = get_cc_state_protected(ppd); 1543 RCU_INIT_POINTER(ppd->cc_state, NULL); 1544 spin_unlock(&ppd->cc_state_lock); 1545 1546 if (cc_state) 1547 kfree_rcu(cc_state, rcu); 1548 } 1549 1550 free_credit_return(dd); 1551 1552 if (dd->rcvhdrtail_dummy_kvaddr) { 1553 dma_free_coherent(&dd->pcidev->dev, sizeof(u64), 1554 (void *)dd->rcvhdrtail_dummy_kvaddr, 1555 dd->rcvhdrtail_dummy_dma); 1556 dd->rcvhdrtail_dummy_kvaddr = NULL; 1557 } 1558 1559 /* 1560 * Free any resources still in use (usually just kernel contexts) 1561 * at unload; we do for ctxtcnt, because that's what we allocate. 1562 */ 1563 for (ctxt = 0; dd->rcd && ctxt < dd->num_rcv_contexts; ctxt++) { 1564 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt]; 1565 1566 if (rcd) { 1567 hfi1_clear_tids(rcd); 1568 hfi1_free_ctxt(rcd); 1569 } 1570 } 1571 1572 kfree(dd->rcd); 1573 dd->rcd = NULL; 1574 1575 free_pio_map(dd); 1576 /* must follow rcv context free - need to remove rcv's hooks */ 1577 for (ctxt = 0; ctxt < dd->num_send_contexts; ctxt++) 1578 sc_free(dd->send_contexts[ctxt].sc); 1579 dd->num_send_contexts = 0; 1580 kfree(dd->send_contexts); 1581 dd->send_contexts = NULL; 1582 kfree(dd->hw_to_sw); 1583 dd->hw_to_sw = NULL; 1584 kfree(dd->boardname); 1585 vfree(dd->events); 1586 vfree(dd->status); 1587 } 1588 1589 /* 1590 * Clean up on unit shutdown, or error during unit load after 1591 * successful initialization. 1592 */ 1593 static void postinit_cleanup(struct hfi1_devdata *dd) 1594 { 1595 hfi1_start_cleanup(dd); 1596 hfi1_comp_vectors_clean_up(dd); 1597 hfi1_dev_affinity_clean_up(dd); 1598 1599 hfi1_pcie_ddcleanup(dd); 1600 hfi1_pcie_cleanup(dd->pcidev); 1601 1602 cleanup_device_data(dd); 1603 1604 hfi1_free_devdata(dd); 1605 } 1606 1607 static int init_validate_rcvhdrcnt(struct device *dev, uint thecnt) 1608 { 1609 if (thecnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) { 1610 hfi1_early_err(dev, "Receive header queue count too small\n"); 1611 return -EINVAL; 1612 } 1613 1614 if (thecnt > HFI1_MAX_HDRQ_EGRBUF_CNT) { 1615 hfi1_early_err(dev, 1616 "Receive header queue count cannot be greater than %u\n", 1617 HFI1_MAX_HDRQ_EGRBUF_CNT); 1618 return -EINVAL; 1619 } 1620 1621 if (thecnt % HDRQ_INCREMENT) { 1622 hfi1_early_err(dev, "Receive header queue count %d must be divisible by %lu\n", 1623 thecnt, HDRQ_INCREMENT); 1624 return -EINVAL; 1625 } 1626 1627 return 0; 1628 } 1629 1630 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 1631 { 1632 int ret = 0, j, pidx, initfail; 1633 struct hfi1_devdata *dd; 1634 struct hfi1_pportdata *ppd; 1635 1636 /* First, lock the non-writable module parameters */ 1637 HFI1_CAP_LOCK(); 1638 1639 /* Validate dev ids */ 1640 if (!(ent->device == PCI_DEVICE_ID_INTEL0 || 1641 ent->device == PCI_DEVICE_ID_INTEL1)) { 1642 hfi1_early_err(&pdev->dev, 1643 "Failing on unknown Intel deviceid 0x%x\n", 1644 ent->device); 1645 ret = -ENODEV; 1646 goto bail; 1647 } 1648 1649 /* Validate some global module parameters */ 1650 ret = init_validate_rcvhdrcnt(&pdev->dev, rcvhdrcnt); 1651 if (ret) 1652 goto bail; 1653 1654 /* use the encoding function as a sanitization check */ 1655 if (!encode_rcv_header_entry_size(hfi1_hdrq_entsize)) { 1656 hfi1_early_err(&pdev->dev, "Invalid HdrQ Entry size %u\n", 1657 hfi1_hdrq_entsize); 1658 ret = -EINVAL; 1659 goto bail; 1660 } 1661 1662 /* The receive eager buffer size must be set before the receive 1663 * contexts are created. 1664 * 1665 * Set the eager buffer size. Validate that it falls in a range 1666 * allowed by the hardware - all powers of 2 between the min and 1667 * max. The maximum valid MTU is within the eager buffer range 1668 * so we do not need to cap the max_mtu by an eager buffer size 1669 * setting. 1670 */ 1671 if (eager_buffer_size) { 1672 if (!is_power_of_2(eager_buffer_size)) 1673 eager_buffer_size = 1674 roundup_pow_of_two(eager_buffer_size); 1675 eager_buffer_size = 1676 clamp_val(eager_buffer_size, 1677 MIN_EAGER_BUFFER * 8, 1678 MAX_EAGER_BUFFER_TOTAL); 1679 hfi1_early_info(&pdev->dev, "Eager buffer size %u\n", 1680 eager_buffer_size); 1681 } else { 1682 hfi1_early_err(&pdev->dev, "Invalid Eager buffer size of 0\n"); 1683 ret = -EINVAL; 1684 goto bail; 1685 } 1686 1687 /* restrict value of hfi1_rcvarr_split */ 1688 hfi1_rcvarr_split = clamp_val(hfi1_rcvarr_split, 0, 100); 1689 1690 ret = hfi1_pcie_init(pdev, ent); 1691 if (ret) 1692 goto bail; 1693 1694 /* 1695 * Do device-specific initialization, function table setup, dd 1696 * allocation, etc. 1697 */ 1698 dd = hfi1_init_dd(pdev, ent); 1699 1700 if (IS_ERR(dd)) { 1701 ret = PTR_ERR(dd); 1702 goto clean_bail; /* error already printed */ 1703 } 1704 1705 ret = create_workqueues(dd); 1706 if (ret) 1707 goto clean_bail; 1708 1709 /* do the generic initialization */ 1710 initfail = hfi1_init(dd, 0); 1711 1712 /* setup vnic */ 1713 hfi1_vnic_setup(dd); 1714 1715 ret = hfi1_register_ib_device(dd); 1716 1717 /* 1718 * Now ready for use. this should be cleared whenever we 1719 * detect a reset, or initiate one. If earlier failure, 1720 * we still create devices, so diags, etc. can be used 1721 * to determine cause of problem. 1722 */ 1723 if (!initfail && !ret) { 1724 dd->flags |= HFI1_INITTED; 1725 /* create debufs files after init and ib register */ 1726 hfi1_dbg_ibdev_init(&dd->verbs_dev); 1727 } 1728 1729 j = hfi1_device_create(dd); 1730 if (j) 1731 dd_dev_err(dd, "Failed to create /dev devices: %d\n", -j); 1732 1733 if (initfail || ret) { 1734 hfi1_clean_up_interrupts(dd); 1735 stop_timers(dd); 1736 flush_workqueue(ib_wq); 1737 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1738 hfi1_quiet_serdes(dd->pport + pidx); 1739 ppd = dd->pport + pidx; 1740 if (ppd->hfi1_wq) { 1741 destroy_workqueue(ppd->hfi1_wq); 1742 ppd->hfi1_wq = NULL; 1743 } 1744 if (ppd->link_wq) { 1745 destroy_workqueue(ppd->link_wq); 1746 ppd->link_wq = NULL; 1747 } 1748 } 1749 if (!j) 1750 hfi1_device_remove(dd); 1751 if (!ret) 1752 hfi1_unregister_ib_device(dd); 1753 hfi1_vnic_cleanup(dd); 1754 postinit_cleanup(dd); 1755 if (initfail) 1756 ret = initfail; 1757 goto bail; /* everything already cleaned */ 1758 } 1759 1760 sdma_start(dd); 1761 1762 return 0; 1763 1764 clean_bail: 1765 hfi1_pcie_cleanup(pdev); 1766 bail: 1767 return ret; 1768 } 1769 1770 static void wait_for_clients(struct hfi1_devdata *dd) 1771 { 1772 /* 1773 * Remove the device init value and complete the device if there is 1774 * no clients or wait for active clients to finish. 1775 */ 1776 if (atomic_dec_and_test(&dd->user_refcount)) 1777 complete(&dd->user_comp); 1778 1779 wait_for_completion(&dd->user_comp); 1780 } 1781 1782 static void remove_one(struct pci_dev *pdev) 1783 { 1784 struct hfi1_devdata *dd = pci_get_drvdata(pdev); 1785 1786 /* close debugfs files before ib unregister */ 1787 hfi1_dbg_ibdev_exit(&dd->verbs_dev); 1788 1789 /* remove the /dev hfi1 interface */ 1790 hfi1_device_remove(dd); 1791 1792 /* wait for existing user space clients to finish */ 1793 wait_for_clients(dd); 1794 1795 /* unregister from IB core */ 1796 hfi1_unregister_ib_device(dd); 1797 1798 /* cleanup vnic */ 1799 hfi1_vnic_cleanup(dd); 1800 1801 /* 1802 * Disable the IB link, disable interrupts on the device, 1803 * clear dma engines, etc. 1804 */ 1805 shutdown_device(dd); 1806 1807 stop_timers(dd); 1808 1809 /* wait until all of our (qsfp) queue_work() calls complete */ 1810 flush_workqueue(ib_wq); 1811 1812 postinit_cleanup(dd); 1813 } 1814 1815 static void shutdown_one(struct pci_dev *pdev) 1816 { 1817 struct hfi1_devdata *dd = pci_get_drvdata(pdev); 1818 1819 shutdown_device(dd); 1820 } 1821 1822 /** 1823 * hfi1_create_rcvhdrq - create a receive header queue 1824 * @dd: the hfi1_ib device 1825 * @rcd: the context data 1826 * 1827 * This must be contiguous memory (from an i/o perspective), and must be 1828 * DMA'able (which means for some systems, it will go through an IOMMU, 1829 * or be forced into a low address range). 1830 */ 1831 int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) 1832 { 1833 unsigned amt; 1834 u64 reg; 1835 1836 if (!rcd->rcvhdrq) { 1837 gfp_t gfp_flags; 1838 1839 amt = rcvhdrq_size(rcd); 1840 1841 if (rcd->ctxt < dd->first_dyn_alloc_ctxt || rcd->is_vnic) 1842 gfp_flags = GFP_KERNEL; 1843 else 1844 gfp_flags = GFP_USER; 1845 rcd->rcvhdrq = dma_zalloc_coherent( 1846 &dd->pcidev->dev, amt, &rcd->rcvhdrq_dma, 1847 gfp_flags | __GFP_COMP); 1848 1849 if (!rcd->rcvhdrq) { 1850 dd_dev_err(dd, 1851 "attempt to allocate %d bytes for ctxt %u rcvhdrq failed\n", 1852 amt, rcd->ctxt); 1853 goto bail; 1854 } 1855 1856 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) || 1857 HFI1_CAP_UGET_MASK(rcd->flags, DMA_RTAIL)) { 1858 rcd->rcvhdrtail_kvaddr = dma_zalloc_coherent( 1859 &dd->pcidev->dev, PAGE_SIZE, 1860 &rcd->rcvhdrqtailaddr_dma, gfp_flags); 1861 if (!rcd->rcvhdrtail_kvaddr) 1862 goto bail_free; 1863 } 1864 } 1865 /* 1866 * These values are per-context: 1867 * RcvHdrCnt 1868 * RcvHdrEntSize 1869 * RcvHdrSize 1870 */ 1871 reg = ((u64)(rcd->rcvhdrq_cnt >> HDRQ_SIZE_SHIFT) 1872 & RCV_HDR_CNT_CNT_MASK) 1873 << RCV_HDR_CNT_CNT_SHIFT; 1874 write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_CNT, reg); 1875 reg = (encode_rcv_header_entry_size(rcd->rcvhdrqentsize) 1876 & RCV_HDR_ENT_SIZE_ENT_SIZE_MASK) 1877 << RCV_HDR_ENT_SIZE_ENT_SIZE_SHIFT; 1878 write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_ENT_SIZE, reg); 1879 reg = ((u64)DEFAULT_RCVHDRSIZE & RCV_HDR_SIZE_HDR_SIZE_MASK) 1880 << RCV_HDR_SIZE_HDR_SIZE_SHIFT; 1881 write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_SIZE, reg); 1882 1883 /* 1884 * Program dummy tail address for every receive context 1885 * before enabling any receive context 1886 */ 1887 write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_TAIL_ADDR, 1888 dd->rcvhdrtail_dummy_dma); 1889 1890 return 0; 1891 1892 bail_free: 1893 dd_dev_err(dd, 1894 "attempt to allocate 1 page for ctxt %u rcvhdrqtailaddr failed\n", 1895 rcd->ctxt); 1896 dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq, 1897 rcd->rcvhdrq_dma); 1898 rcd->rcvhdrq = NULL; 1899 bail: 1900 return -ENOMEM; 1901 } 1902 1903 /** 1904 * allocate eager buffers, both kernel and user contexts. 1905 * @rcd: the context we are setting up. 1906 * 1907 * Allocate the eager TID buffers and program them into hip. 1908 * They are no longer completely contiguous, we do multiple allocation 1909 * calls. Otherwise we get the OOM code involved, by asking for too 1910 * much per call, with disastrous results on some kernels. 1911 */ 1912 int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd) 1913 { 1914 struct hfi1_devdata *dd = rcd->dd; 1915 u32 max_entries, egrtop, alloced_bytes = 0; 1916 gfp_t gfp_flags; 1917 u16 order, idx = 0; 1918 int ret = 0; 1919 u16 round_mtu = roundup_pow_of_two(hfi1_max_mtu); 1920 1921 /* 1922 * GFP_USER, but without GFP_FS, so buffer cache can be 1923 * coalesced (we hope); otherwise, even at order 4, 1924 * heavy filesystem activity makes these fail, and we can 1925 * use compound pages. 1926 */ 1927 gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP; 1928 1929 /* 1930 * The minimum size of the eager buffers is a groups of MTU-sized 1931 * buffers. 1932 * The global eager_buffer_size parameter is checked against the 1933 * theoretical lower limit of the value. Here, we check against the 1934 * MTU. 1935 */ 1936 if (rcd->egrbufs.size < (round_mtu * dd->rcv_entries.group_size)) 1937 rcd->egrbufs.size = round_mtu * dd->rcv_entries.group_size; 1938 /* 1939 * If using one-pkt-per-egr-buffer, lower the eager buffer 1940 * size to the max MTU (page-aligned). 1941 */ 1942 if (!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) 1943 rcd->egrbufs.rcvtid_size = round_mtu; 1944 1945 /* 1946 * Eager buffers sizes of 1MB or less require smaller TID sizes 1947 * to satisfy the "multiple of 8 RcvArray entries" requirement. 1948 */ 1949 if (rcd->egrbufs.size <= (1 << 20)) 1950 rcd->egrbufs.rcvtid_size = max((unsigned long)round_mtu, 1951 rounddown_pow_of_two(rcd->egrbufs.size / 8)); 1952 1953 while (alloced_bytes < rcd->egrbufs.size && 1954 rcd->egrbufs.alloced < rcd->egrbufs.count) { 1955 rcd->egrbufs.buffers[idx].addr = 1956 dma_zalloc_coherent(&dd->pcidev->dev, 1957 rcd->egrbufs.rcvtid_size, 1958 &rcd->egrbufs.buffers[idx].dma, 1959 gfp_flags); 1960 if (rcd->egrbufs.buffers[idx].addr) { 1961 rcd->egrbufs.buffers[idx].len = 1962 rcd->egrbufs.rcvtid_size; 1963 rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].addr = 1964 rcd->egrbufs.buffers[idx].addr; 1965 rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].dma = 1966 rcd->egrbufs.buffers[idx].dma; 1967 rcd->egrbufs.alloced++; 1968 alloced_bytes += rcd->egrbufs.rcvtid_size; 1969 idx++; 1970 } else { 1971 u32 new_size, i, j; 1972 u64 offset = 0; 1973 1974 /* 1975 * Fail the eager buffer allocation if: 1976 * - we are already using the lowest acceptable size 1977 * - we are using one-pkt-per-egr-buffer (this implies 1978 * that we are accepting only one size) 1979 */ 1980 if (rcd->egrbufs.rcvtid_size == round_mtu || 1981 !HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) { 1982 dd_dev_err(dd, "ctxt%u: Failed to allocate eager buffers\n", 1983 rcd->ctxt); 1984 ret = -ENOMEM; 1985 goto bail_rcvegrbuf_phys; 1986 } 1987 1988 new_size = rcd->egrbufs.rcvtid_size / 2; 1989 1990 /* 1991 * If the first attempt to allocate memory failed, don't 1992 * fail everything but continue with the next lower 1993 * size. 1994 */ 1995 if (idx == 0) { 1996 rcd->egrbufs.rcvtid_size = new_size; 1997 continue; 1998 } 1999 2000 /* 2001 * Re-partition already allocated buffers to a smaller 2002 * size. 2003 */ 2004 rcd->egrbufs.alloced = 0; 2005 for (i = 0, j = 0, offset = 0; j < idx; i++) { 2006 if (i >= rcd->egrbufs.count) 2007 break; 2008 rcd->egrbufs.rcvtids[i].dma = 2009 rcd->egrbufs.buffers[j].dma + offset; 2010 rcd->egrbufs.rcvtids[i].addr = 2011 rcd->egrbufs.buffers[j].addr + offset; 2012 rcd->egrbufs.alloced++; 2013 if ((rcd->egrbufs.buffers[j].dma + offset + 2014 new_size) == 2015 (rcd->egrbufs.buffers[j].dma + 2016 rcd->egrbufs.buffers[j].len)) { 2017 j++; 2018 offset = 0; 2019 } else { 2020 offset += new_size; 2021 } 2022 } 2023 rcd->egrbufs.rcvtid_size = new_size; 2024 } 2025 } 2026 rcd->egrbufs.numbufs = idx; 2027 rcd->egrbufs.size = alloced_bytes; 2028 2029 hfi1_cdbg(PROC, 2030 "ctxt%u: Alloced %u rcv tid entries @ %uKB, total %zuKB\n", 2031 rcd->ctxt, rcd->egrbufs.alloced, 2032 rcd->egrbufs.rcvtid_size / 1024, rcd->egrbufs.size / 1024); 2033 2034 /* 2035 * Set the contexts rcv array head update threshold to the closest 2036 * power of 2 (so we can use a mask instead of modulo) below half 2037 * the allocated entries. 2038 */ 2039 rcd->egrbufs.threshold = 2040 rounddown_pow_of_two(rcd->egrbufs.alloced / 2); 2041 /* 2042 * Compute the expected RcvArray entry base. This is done after 2043 * allocating the eager buffers in order to maximize the 2044 * expected RcvArray entries for the context. 2045 */ 2046 max_entries = rcd->rcv_array_groups * dd->rcv_entries.group_size; 2047 egrtop = roundup(rcd->egrbufs.alloced, dd->rcv_entries.group_size); 2048 rcd->expected_count = max_entries - egrtop; 2049 if (rcd->expected_count > MAX_TID_PAIR_ENTRIES * 2) 2050 rcd->expected_count = MAX_TID_PAIR_ENTRIES * 2; 2051 2052 rcd->expected_base = rcd->eager_base + egrtop; 2053 hfi1_cdbg(PROC, "ctxt%u: eager:%u, exp:%u, egrbase:%u, expbase:%u\n", 2054 rcd->ctxt, rcd->egrbufs.alloced, rcd->expected_count, 2055 rcd->eager_base, rcd->expected_base); 2056 2057 if (!hfi1_rcvbuf_validate(rcd->egrbufs.rcvtid_size, PT_EAGER, &order)) { 2058 hfi1_cdbg(PROC, 2059 "ctxt%u: current Eager buffer size is invalid %u\n", 2060 rcd->ctxt, rcd->egrbufs.rcvtid_size); 2061 ret = -EINVAL; 2062 goto bail_rcvegrbuf_phys; 2063 } 2064 2065 for (idx = 0; idx < rcd->egrbufs.alloced; idx++) { 2066 hfi1_put_tid(dd, rcd->eager_base + idx, PT_EAGER, 2067 rcd->egrbufs.rcvtids[idx].dma, order); 2068 cond_resched(); 2069 } 2070 2071 return 0; 2072 2073 bail_rcvegrbuf_phys: 2074 for (idx = 0; idx < rcd->egrbufs.alloced && 2075 rcd->egrbufs.buffers[idx].addr; 2076 idx++) { 2077 dma_free_coherent(&dd->pcidev->dev, 2078 rcd->egrbufs.buffers[idx].len, 2079 rcd->egrbufs.buffers[idx].addr, 2080 rcd->egrbufs.buffers[idx].dma); 2081 rcd->egrbufs.buffers[idx].addr = NULL; 2082 rcd->egrbufs.buffers[idx].dma = 0; 2083 rcd->egrbufs.buffers[idx].len = 0; 2084 } 2085 2086 return ret; 2087 } 2088