1 /* 2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 #include <linux/module.h> 33 #include <linux/moduleparam.h> 34 #include <linux/debugfs.h> 35 #include <linux/vmalloc.h> 36 #include <linux/math64.h> 37 38 #include <rdma/ib_verbs.h> 39 40 #include "iw_cxgb4.h" 41 42 #define DRV_VERSION "0.1" 43 44 MODULE_AUTHOR("Steve Wise"); 45 MODULE_DESCRIPTION("Chelsio T4/T5 RDMA Driver"); 46 MODULE_LICENSE("Dual BSD/GPL"); 47 MODULE_VERSION(DRV_VERSION); 48 49 static int allow_db_fc_on_t5; 50 module_param(allow_db_fc_on_t5, int, 0644); 51 MODULE_PARM_DESC(allow_db_fc_on_t5, 52 "Allow DB Flow Control on T5 (default = 0)"); 53 54 static int allow_db_coalescing_on_t5; 55 module_param(allow_db_coalescing_on_t5, int, 0644); 56 MODULE_PARM_DESC(allow_db_coalescing_on_t5, 57 "Allow DB Coalescing on T5 (default = 0)"); 58 59 int c4iw_wr_log = 0; 60 module_param(c4iw_wr_log, int, 0444); 61 MODULE_PARM_DESC(c4iw_wr_log, "Enables logging of work request timing data."); 62 63 static int c4iw_wr_log_size_order = 12; 64 module_param(c4iw_wr_log_size_order, int, 0444); 65 MODULE_PARM_DESC(c4iw_wr_log_size_order, 66 "Number of entries (log2) in the work request timing log."); 67 68 struct uld_ctx { 69 struct list_head entry; 70 struct cxgb4_lld_info lldi; 71 struct c4iw_dev *dev; 72 }; 73 74 static LIST_HEAD(uld_ctx_list); 75 static DEFINE_MUTEX(dev_mutex); 76 77 #define DB_FC_RESUME_SIZE 64 78 #define DB_FC_RESUME_DELAY 1 79 #define DB_FC_DRAIN_THRESH 0 80 81 static struct dentry *c4iw_debugfs_root; 82 83 struct c4iw_debugfs_data { 84 struct c4iw_dev *devp; 85 char *buf; 86 int bufsize; 87 int pos; 88 }; 89 90 static int count_idrs(int id, void *p, void *data) 91 { 92 int *countp = data; 93 94 *countp = *countp + 1; 95 return 0; 96 } 97 98 static ssize_t debugfs_read(struct file *file, char __user *buf, size_t count, 99 loff_t *ppos) 100 { 101 struct c4iw_debugfs_data *d = file->private_data; 102 103 return simple_read_from_buffer(buf, count, ppos, d->buf, d->pos); 104 } 105 106 void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe) 107 { 108 struct wr_log_entry le; 109 int idx; 110 111 if (!wq->rdev->wr_log) 112 return; 113 114 idx = (atomic_inc_return(&wq->rdev->wr_log_idx) - 1) & 115 (wq->rdev->wr_log_size - 1); 116 le.poll_sge_ts = cxgb4_read_sge_timestamp(wq->rdev->lldi.ports[0]); 117 getnstimeofday(&le.poll_host_ts); 118 le.valid = 1; 119 le.cqe_sge_ts = CQE_TS(cqe); 120 if (SQ_TYPE(cqe)) { 121 le.qid = wq->sq.qid; 122 le.opcode = CQE_OPCODE(cqe); 123 le.post_host_ts = wq->sq.sw_sq[wq->sq.cidx].host_ts; 124 le.post_sge_ts = wq->sq.sw_sq[wq->sq.cidx].sge_ts; 125 le.wr_id = CQE_WRID_SQ_IDX(cqe); 126 } else { 127 le.qid = wq->rq.qid; 128 le.opcode = FW_RI_RECEIVE; 129 le.post_host_ts = wq->rq.sw_rq[wq->rq.cidx].host_ts; 130 le.post_sge_ts = wq->rq.sw_rq[wq->rq.cidx].sge_ts; 131 le.wr_id = CQE_WRID_MSN(cqe); 132 } 133 wq->rdev->wr_log[idx] = le; 134 } 135 136 static int wr_log_show(struct seq_file *seq, void *v) 137 { 138 struct c4iw_dev *dev = seq->private; 139 struct timespec prev_ts = {0, 0}; 140 struct wr_log_entry *lep; 141 int prev_ts_set = 0; 142 int idx, end; 143 144 #define ts2ns(ts) div64_u64((ts) * dev->rdev.lldi.cclk_ps, 1000) 145 146 idx = atomic_read(&dev->rdev.wr_log_idx) & 147 (dev->rdev.wr_log_size - 1); 148 end = idx - 1; 149 if (end < 0) 150 end = dev->rdev.wr_log_size - 1; 151 lep = &dev->rdev.wr_log[idx]; 152 while (idx != end) { 153 if (lep->valid) { 154 if (!prev_ts_set) { 155 prev_ts_set = 1; 156 prev_ts = lep->poll_host_ts; 157 } 158 seq_printf(seq, "%04u: sec %lu nsec %lu qid %u opcode " 159 "%u %s 0x%x host_wr_delta sec %lu nsec %lu " 160 "post_sge_ts 0x%llx cqe_sge_ts 0x%llx " 161 "poll_sge_ts 0x%llx post_poll_delta_ns %llu " 162 "cqe_poll_delta_ns %llu\n", 163 idx, 164 timespec_sub(lep->poll_host_ts, 165 prev_ts).tv_sec, 166 timespec_sub(lep->poll_host_ts, 167 prev_ts).tv_nsec, 168 lep->qid, lep->opcode, 169 lep->opcode == FW_RI_RECEIVE ? 170 "msn" : "wrid", 171 lep->wr_id, 172 timespec_sub(lep->poll_host_ts, 173 lep->post_host_ts).tv_sec, 174 timespec_sub(lep->poll_host_ts, 175 lep->post_host_ts).tv_nsec, 176 lep->post_sge_ts, lep->cqe_sge_ts, 177 lep->poll_sge_ts, 178 ts2ns(lep->poll_sge_ts - lep->post_sge_ts), 179 ts2ns(lep->poll_sge_ts - lep->cqe_sge_ts)); 180 prev_ts = lep->poll_host_ts; 181 } 182 idx++; 183 if (idx > (dev->rdev.wr_log_size - 1)) 184 idx = 0; 185 lep = &dev->rdev.wr_log[idx]; 186 } 187 #undef ts2ns 188 return 0; 189 } 190 191 static int wr_log_open(struct inode *inode, struct file *file) 192 { 193 return single_open(file, wr_log_show, inode->i_private); 194 } 195 196 static ssize_t wr_log_clear(struct file *file, const char __user *buf, 197 size_t count, loff_t *pos) 198 { 199 struct c4iw_dev *dev = ((struct seq_file *)file->private_data)->private; 200 int i; 201 202 if (dev->rdev.wr_log) 203 for (i = 0; i < dev->rdev.wr_log_size; i++) 204 dev->rdev.wr_log[i].valid = 0; 205 return count; 206 } 207 208 static const struct file_operations wr_log_debugfs_fops = { 209 .owner = THIS_MODULE, 210 .open = wr_log_open, 211 .release = single_release, 212 .read = seq_read, 213 .llseek = seq_lseek, 214 .write = wr_log_clear, 215 }; 216 217 static struct sockaddr_in zero_sin = { 218 .sin_family = AF_INET, 219 }; 220 221 static struct sockaddr_in6 zero_sin6 = { 222 .sin6_family = AF_INET6, 223 }; 224 225 static void set_ep_sin_addrs(struct c4iw_ep *ep, 226 struct sockaddr_in **lsin, 227 struct sockaddr_in **rsin, 228 struct sockaddr_in **m_lsin, 229 struct sockaddr_in **m_rsin) 230 { 231 struct iw_cm_id *id = ep->com.cm_id; 232 233 *lsin = (struct sockaddr_in *)&ep->com.local_addr; 234 *rsin = (struct sockaddr_in *)&ep->com.remote_addr; 235 if (id) { 236 *m_lsin = (struct sockaddr_in *)&id->m_local_addr; 237 *m_rsin = (struct sockaddr_in *)&id->m_remote_addr; 238 } else { 239 *m_lsin = &zero_sin; 240 *m_rsin = &zero_sin; 241 } 242 } 243 244 static void set_ep_sin6_addrs(struct c4iw_ep *ep, 245 struct sockaddr_in6 **lsin6, 246 struct sockaddr_in6 **rsin6, 247 struct sockaddr_in6 **m_lsin6, 248 struct sockaddr_in6 **m_rsin6) 249 { 250 struct iw_cm_id *id = ep->com.cm_id; 251 252 *lsin6 = (struct sockaddr_in6 *)&ep->com.local_addr; 253 *rsin6 = (struct sockaddr_in6 *)&ep->com.remote_addr; 254 if (id) { 255 *m_lsin6 = (struct sockaddr_in6 *)&id->m_local_addr; 256 *m_rsin6 = (struct sockaddr_in6 *)&id->m_remote_addr; 257 } else { 258 *m_lsin6 = &zero_sin6; 259 *m_rsin6 = &zero_sin6; 260 } 261 } 262 263 static int dump_qp(int id, void *p, void *data) 264 { 265 struct c4iw_qp *qp = p; 266 struct c4iw_debugfs_data *qpd = data; 267 int space; 268 int cc; 269 270 if (id != qp->wq.sq.qid) 271 return 0; 272 273 space = qpd->bufsize - qpd->pos - 1; 274 if (space == 0) 275 return 1; 276 277 if (qp->ep) { 278 struct c4iw_ep *ep = qp->ep; 279 280 if (ep->com.local_addr.ss_family == AF_INET) { 281 struct sockaddr_in *lsin; 282 struct sockaddr_in *rsin; 283 struct sockaddr_in *m_lsin; 284 struct sockaddr_in *m_rsin; 285 286 set_ep_sin_addrs(ep, &lsin, &rsin, &m_lsin, &m_rsin); 287 cc = snprintf(qpd->buf + qpd->pos, space, 288 "rc qp sq id %u rq id %u state %u " 289 "onchip %u ep tid %u state %u " 290 "%pI4:%u/%u->%pI4:%u/%u\n", 291 qp->wq.sq.qid, qp->wq.rq.qid, 292 (int)qp->attr.state, 293 qp->wq.sq.flags & T4_SQ_ONCHIP, 294 ep->hwtid, (int)ep->com.state, 295 &lsin->sin_addr, ntohs(lsin->sin_port), 296 ntohs(m_lsin->sin_port), 297 &rsin->sin_addr, ntohs(rsin->sin_port), 298 ntohs(m_rsin->sin_port)); 299 } else { 300 struct sockaddr_in6 *lsin6; 301 struct sockaddr_in6 *rsin6; 302 struct sockaddr_in6 *m_lsin6; 303 struct sockaddr_in6 *m_rsin6; 304 305 set_ep_sin6_addrs(ep, &lsin6, &rsin6, &m_lsin6, 306 &m_rsin6); 307 cc = snprintf(qpd->buf + qpd->pos, space, 308 "rc qp sq id %u rq id %u state %u " 309 "onchip %u ep tid %u state %u " 310 "%pI6:%u/%u->%pI6:%u/%u\n", 311 qp->wq.sq.qid, qp->wq.rq.qid, 312 (int)qp->attr.state, 313 qp->wq.sq.flags & T4_SQ_ONCHIP, 314 ep->hwtid, (int)ep->com.state, 315 &lsin6->sin6_addr, 316 ntohs(lsin6->sin6_port), 317 ntohs(m_lsin6->sin6_port), 318 &rsin6->sin6_addr, 319 ntohs(rsin6->sin6_port), 320 ntohs(m_rsin6->sin6_port)); 321 } 322 } else 323 cc = snprintf(qpd->buf + qpd->pos, space, 324 "qp sq id %u rq id %u state %u onchip %u\n", 325 qp->wq.sq.qid, qp->wq.rq.qid, 326 (int)qp->attr.state, 327 qp->wq.sq.flags & T4_SQ_ONCHIP); 328 if (cc < space) 329 qpd->pos += cc; 330 return 0; 331 } 332 333 static int qp_release(struct inode *inode, struct file *file) 334 { 335 struct c4iw_debugfs_data *qpd = file->private_data; 336 if (!qpd) { 337 pr_info("%s null qpd?\n", __func__); 338 return 0; 339 } 340 vfree(qpd->buf); 341 kfree(qpd); 342 return 0; 343 } 344 345 static int qp_open(struct inode *inode, struct file *file) 346 { 347 struct c4iw_debugfs_data *qpd; 348 int count = 1; 349 350 qpd = kmalloc(sizeof *qpd, GFP_KERNEL); 351 if (!qpd) 352 return -ENOMEM; 353 354 qpd->devp = inode->i_private; 355 qpd->pos = 0; 356 357 spin_lock_irq(&qpd->devp->lock); 358 idr_for_each(&qpd->devp->qpidr, count_idrs, &count); 359 spin_unlock_irq(&qpd->devp->lock); 360 361 qpd->bufsize = count * 180; 362 qpd->buf = vmalloc(qpd->bufsize); 363 if (!qpd->buf) { 364 kfree(qpd); 365 return -ENOMEM; 366 } 367 368 spin_lock_irq(&qpd->devp->lock); 369 idr_for_each(&qpd->devp->qpidr, dump_qp, qpd); 370 spin_unlock_irq(&qpd->devp->lock); 371 372 qpd->buf[qpd->pos++] = 0; 373 file->private_data = qpd; 374 return 0; 375 } 376 377 static const struct file_operations qp_debugfs_fops = { 378 .owner = THIS_MODULE, 379 .open = qp_open, 380 .release = qp_release, 381 .read = debugfs_read, 382 .llseek = default_llseek, 383 }; 384 385 static int dump_stag(int id, void *p, void *data) 386 { 387 struct c4iw_debugfs_data *stagd = data; 388 int space; 389 int cc; 390 struct fw_ri_tpte tpte; 391 int ret; 392 393 space = stagd->bufsize - stagd->pos - 1; 394 if (space == 0) 395 return 1; 396 397 ret = cxgb4_read_tpte(stagd->devp->rdev.lldi.ports[0], (u32)id<<8, 398 (__be32 *)&tpte); 399 if (ret) { 400 dev_err(&stagd->devp->rdev.lldi.pdev->dev, 401 "%s cxgb4_read_tpte err %d\n", __func__, ret); 402 return ret; 403 } 404 cc = snprintf(stagd->buf + stagd->pos, space, 405 "stag: idx 0x%x valid %d key 0x%x state %d pdid %d " 406 "perm 0x%x ps %d len 0x%llx va 0x%llx\n", 407 (u32)id<<8, 408 FW_RI_TPTE_VALID_G(ntohl(tpte.valid_to_pdid)), 409 FW_RI_TPTE_STAGKEY_G(ntohl(tpte.valid_to_pdid)), 410 FW_RI_TPTE_STAGSTATE_G(ntohl(tpte.valid_to_pdid)), 411 FW_RI_TPTE_PDID_G(ntohl(tpte.valid_to_pdid)), 412 FW_RI_TPTE_PERM_G(ntohl(tpte.locread_to_qpid)), 413 FW_RI_TPTE_PS_G(ntohl(tpte.locread_to_qpid)), 414 ((u64)ntohl(tpte.len_hi) << 32) | ntohl(tpte.len_lo), 415 ((u64)ntohl(tpte.va_hi) << 32) | ntohl(tpte.va_lo_fbo)); 416 if (cc < space) 417 stagd->pos += cc; 418 return 0; 419 } 420 421 static int stag_release(struct inode *inode, struct file *file) 422 { 423 struct c4iw_debugfs_data *stagd = file->private_data; 424 if (!stagd) { 425 pr_info("%s null stagd?\n", __func__); 426 return 0; 427 } 428 vfree(stagd->buf); 429 kfree(stagd); 430 return 0; 431 } 432 433 static int stag_open(struct inode *inode, struct file *file) 434 { 435 struct c4iw_debugfs_data *stagd; 436 int ret = 0; 437 int count = 1; 438 439 stagd = kmalloc(sizeof *stagd, GFP_KERNEL); 440 if (!stagd) { 441 ret = -ENOMEM; 442 goto out; 443 } 444 stagd->devp = inode->i_private; 445 stagd->pos = 0; 446 447 spin_lock_irq(&stagd->devp->lock); 448 idr_for_each(&stagd->devp->mmidr, count_idrs, &count); 449 spin_unlock_irq(&stagd->devp->lock); 450 451 stagd->bufsize = count * 256; 452 stagd->buf = vmalloc(stagd->bufsize); 453 if (!stagd->buf) { 454 ret = -ENOMEM; 455 goto err1; 456 } 457 458 spin_lock_irq(&stagd->devp->lock); 459 idr_for_each(&stagd->devp->mmidr, dump_stag, stagd); 460 spin_unlock_irq(&stagd->devp->lock); 461 462 stagd->buf[stagd->pos++] = 0; 463 file->private_data = stagd; 464 goto out; 465 err1: 466 kfree(stagd); 467 out: 468 return ret; 469 } 470 471 static const struct file_operations stag_debugfs_fops = { 472 .owner = THIS_MODULE, 473 .open = stag_open, 474 .release = stag_release, 475 .read = debugfs_read, 476 .llseek = default_llseek, 477 }; 478 479 static char *db_state_str[] = {"NORMAL", "FLOW_CONTROL", "RECOVERY", "STOPPED"}; 480 481 static int stats_show(struct seq_file *seq, void *v) 482 { 483 struct c4iw_dev *dev = seq->private; 484 485 seq_printf(seq, " Object: %10s %10s %10s %10s\n", "Total", "Current", 486 "Max", "Fail"); 487 seq_printf(seq, " PDID: %10llu %10llu %10llu %10llu\n", 488 dev->rdev.stats.pd.total, dev->rdev.stats.pd.cur, 489 dev->rdev.stats.pd.max, dev->rdev.stats.pd.fail); 490 seq_printf(seq, " QID: %10llu %10llu %10llu %10llu\n", 491 dev->rdev.stats.qid.total, dev->rdev.stats.qid.cur, 492 dev->rdev.stats.qid.max, dev->rdev.stats.qid.fail); 493 seq_printf(seq, " TPTMEM: %10llu %10llu %10llu %10llu\n", 494 dev->rdev.stats.stag.total, dev->rdev.stats.stag.cur, 495 dev->rdev.stats.stag.max, dev->rdev.stats.stag.fail); 496 seq_printf(seq, " PBLMEM: %10llu %10llu %10llu %10llu\n", 497 dev->rdev.stats.pbl.total, dev->rdev.stats.pbl.cur, 498 dev->rdev.stats.pbl.max, dev->rdev.stats.pbl.fail); 499 seq_printf(seq, " RQTMEM: %10llu %10llu %10llu %10llu\n", 500 dev->rdev.stats.rqt.total, dev->rdev.stats.rqt.cur, 501 dev->rdev.stats.rqt.max, dev->rdev.stats.rqt.fail); 502 seq_printf(seq, " OCQPMEM: %10llu %10llu %10llu %10llu\n", 503 dev->rdev.stats.ocqp.total, dev->rdev.stats.ocqp.cur, 504 dev->rdev.stats.ocqp.max, dev->rdev.stats.ocqp.fail); 505 seq_printf(seq, " DB FULL: %10llu\n", dev->rdev.stats.db_full); 506 seq_printf(seq, " DB EMPTY: %10llu\n", dev->rdev.stats.db_empty); 507 seq_printf(seq, " DB DROP: %10llu\n", dev->rdev.stats.db_drop); 508 seq_printf(seq, " DB State: %s Transitions %llu FC Interruptions %llu\n", 509 db_state_str[dev->db_state], 510 dev->rdev.stats.db_state_transitions, 511 dev->rdev.stats.db_fc_interruptions); 512 seq_printf(seq, "TCAM_FULL: %10llu\n", dev->rdev.stats.tcam_full); 513 seq_printf(seq, "ACT_OFLD_CONN_FAILS: %10llu\n", 514 dev->rdev.stats.act_ofld_conn_fails); 515 seq_printf(seq, "PAS_OFLD_CONN_FAILS: %10llu\n", 516 dev->rdev.stats.pas_ofld_conn_fails); 517 seq_printf(seq, "NEG_ADV_RCVD: %10llu\n", dev->rdev.stats.neg_adv); 518 seq_printf(seq, "AVAILABLE IRD: %10u\n", dev->avail_ird); 519 return 0; 520 } 521 522 static int stats_open(struct inode *inode, struct file *file) 523 { 524 return single_open(file, stats_show, inode->i_private); 525 } 526 527 static ssize_t stats_clear(struct file *file, const char __user *buf, 528 size_t count, loff_t *pos) 529 { 530 struct c4iw_dev *dev = ((struct seq_file *)file->private_data)->private; 531 532 mutex_lock(&dev->rdev.stats.lock); 533 dev->rdev.stats.pd.max = 0; 534 dev->rdev.stats.pd.fail = 0; 535 dev->rdev.stats.qid.max = 0; 536 dev->rdev.stats.qid.fail = 0; 537 dev->rdev.stats.stag.max = 0; 538 dev->rdev.stats.stag.fail = 0; 539 dev->rdev.stats.pbl.max = 0; 540 dev->rdev.stats.pbl.fail = 0; 541 dev->rdev.stats.rqt.max = 0; 542 dev->rdev.stats.rqt.fail = 0; 543 dev->rdev.stats.ocqp.max = 0; 544 dev->rdev.stats.ocqp.fail = 0; 545 dev->rdev.stats.db_full = 0; 546 dev->rdev.stats.db_empty = 0; 547 dev->rdev.stats.db_drop = 0; 548 dev->rdev.stats.db_state_transitions = 0; 549 dev->rdev.stats.tcam_full = 0; 550 dev->rdev.stats.act_ofld_conn_fails = 0; 551 dev->rdev.stats.pas_ofld_conn_fails = 0; 552 mutex_unlock(&dev->rdev.stats.lock); 553 return count; 554 } 555 556 static const struct file_operations stats_debugfs_fops = { 557 .owner = THIS_MODULE, 558 .open = stats_open, 559 .release = single_release, 560 .read = seq_read, 561 .llseek = seq_lseek, 562 .write = stats_clear, 563 }; 564 565 static int dump_ep(int id, void *p, void *data) 566 { 567 struct c4iw_ep *ep = p; 568 struct c4iw_debugfs_data *epd = data; 569 int space; 570 int cc; 571 572 space = epd->bufsize - epd->pos - 1; 573 if (space == 0) 574 return 1; 575 576 if (ep->com.local_addr.ss_family == AF_INET) { 577 struct sockaddr_in *lsin; 578 struct sockaddr_in *rsin; 579 struct sockaddr_in *m_lsin; 580 struct sockaddr_in *m_rsin; 581 582 set_ep_sin_addrs(ep, &lsin, &rsin, &m_lsin, &m_rsin); 583 cc = snprintf(epd->buf + epd->pos, space, 584 "ep %p cm_id %p qp %p state %d flags 0x%lx " 585 "history 0x%lx hwtid %d atid %d " 586 "conn_na %u abort_na %u " 587 "%pI4:%d/%d <-> %pI4:%d/%d\n", 588 ep, ep->com.cm_id, ep->com.qp, 589 (int)ep->com.state, ep->com.flags, 590 ep->com.history, ep->hwtid, ep->atid, 591 ep->stats.connect_neg_adv, 592 ep->stats.abort_neg_adv, 593 &lsin->sin_addr, ntohs(lsin->sin_port), 594 ntohs(m_lsin->sin_port), 595 &rsin->sin_addr, ntohs(rsin->sin_port), 596 ntohs(m_rsin->sin_port)); 597 } else { 598 struct sockaddr_in6 *lsin6; 599 struct sockaddr_in6 *rsin6; 600 struct sockaddr_in6 *m_lsin6; 601 struct sockaddr_in6 *m_rsin6; 602 603 set_ep_sin6_addrs(ep, &lsin6, &rsin6, &m_lsin6, &m_rsin6); 604 cc = snprintf(epd->buf + epd->pos, space, 605 "ep %p cm_id %p qp %p state %d flags 0x%lx " 606 "history 0x%lx hwtid %d atid %d " 607 "conn_na %u abort_na %u " 608 "%pI6:%d/%d <-> %pI6:%d/%d\n", 609 ep, ep->com.cm_id, ep->com.qp, 610 (int)ep->com.state, ep->com.flags, 611 ep->com.history, ep->hwtid, ep->atid, 612 ep->stats.connect_neg_adv, 613 ep->stats.abort_neg_adv, 614 &lsin6->sin6_addr, ntohs(lsin6->sin6_port), 615 ntohs(m_lsin6->sin6_port), 616 &rsin6->sin6_addr, ntohs(rsin6->sin6_port), 617 ntohs(m_rsin6->sin6_port)); 618 } 619 if (cc < space) 620 epd->pos += cc; 621 return 0; 622 } 623 624 static int dump_listen_ep(int id, void *p, void *data) 625 { 626 struct c4iw_listen_ep *ep = p; 627 struct c4iw_debugfs_data *epd = data; 628 int space; 629 int cc; 630 631 space = epd->bufsize - epd->pos - 1; 632 if (space == 0) 633 return 1; 634 635 if (ep->com.local_addr.ss_family == AF_INET) { 636 struct sockaddr_in *lsin = (struct sockaddr_in *) 637 &ep->com.cm_id->local_addr; 638 struct sockaddr_in *m_lsin = (struct sockaddr_in *) 639 &ep->com.cm_id->m_local_addr; 640 641 cc = snprintf(epd->buf + epd->pos, space, 642 "ep %p cm_id %p state %d flags 0x%lx stid %d " 643 "backlog %d %pI4:%d/%d\n", 644 ep, ep->com.cm_id, (int)ep->com.state, 645 ep->com.flags, ep->stid, ep->backlog, 646 &lsin->sin_addr, ntohs(lsin->sin_port), 647 ntohs(m_lsin->sin_port)); 648 } else { 649 struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *) 650 &ep->com.cm_id->local_addr; 651 struct sockaddr_in6 *m_lsin6 = (struct sockaddr_in6 *) 652 &ep->com.cm_id->m_local_addr; 653 654 cc = snprintf(epd->buf + epd->pos, space, 655 "ep %p cm_id %p state %d flags 0x%lx stid %d " 656 "backlog %d %pI6:%d/%d\n", 657 ep, ep->com.cm_id, (int)ep->com.state, 658 ep->com.flags, ep->stid, ep->backlog, 659 &lsin6->sin6_addr, ntohs(lsin6->sin6_port), 660 ntohs(m_lsin6->sin6_port)); 661 } 662 if (cc < space) 663 epd->pos += cc; 664 return 0; 665 } 666 667 static int ep_release(struct inode *inode, struct file *file) 668 { 669 struct c4iw_debugfs_data *epd = file->private_data; 670 if (!epd) { 671 pr_info("%s null qpd?\n", __func__); 672 return 0; 673 } 674 vfree(epd->buf); 675 kfree(epd); 676 return 0; 677 } 678 679 static int ep_open(struct inode *inode, struct file *file) 680 { 681 struct c4iw_debugfs_data *epd; 682 int ret = 0; 683 int count = 1; 684 685 epd = kmalloc(sizeof(*epd), GFP_KERNEL); 686 if (!epd) { 687 ret = -ENOMEM; 688 goto out; 689 } 690 epd->devp = inode->i_private; 691 epd->pos = 0; 692 693 spin_lock_irq(&epd->devp->lock); 694 idr_for_each(&epd->devp->hwtid_idr, count_idrs, &count); 695 idr_for_each(&epd->devp->atid_idr, count_idrs, &count); 696 idr_for_each(&epd->devp->stid_idr, count_idrs, &count); 697 spin_unlock_irq(&epd->devp->lock); 698 699 epd->bufsize = count * 240; 700 epd->buf = vmalloc(epd->bufsize); 701 if (!epd->buf) { 702 ret = -ENOMEM; 703 goto err1; 704 } 705 706 spin_lock_irq(&epd->devp->lock); 707 idr_for_each(&epd->devp->hwtid_idr, dump_ep, epd); 708 idr_for_each(&epd->devp->atid_idr, dump_ep, epd); 709 idr_for_each(&epd->devp->stid_idr, dump_listen_ep, epd); 710 spin_unlock_irq(&epd->devp->lock); 711 712 file->private_data = epd; 713 goto out; 714 err1: 715 kfree(epd); 716 out: 717 return ret; 718 } 719 720 static const struct file_operations ep_debugfs_fops = { 721 .owner = THIS_MODULE, 722 .open = ep_open, 723 .release = ep_release, 724 .read = debugfs_read, 725 }; 726 727 static int setup_debugfs(struct c4iw_dev *devp) 728 { 729 if (!devp->debugfs_root) 730 return -1; 731 732 debugfs_create_file_size("qps", S_IWUSR, devp->debugfs_root, 733 (void *)devp, &qp_debugfs_fops, 4096); 734 735 debugfs_create_file_size("stags", S_IWUSR, devp->debugfs_root, 736 (void *)devp, &stag_debugfs_fops, 4096); 737 738 debugfs_create_file_size("stats", S_IWUSR, devp->debugfs_root, 739 (void *)devp, &stats_debugfs_fops, 4096); 740 741 debugfs_create_file_size("eps", S_IWUSR, devp->debugfs_root, 742 (void *)devp, &ep_debugfs_fops, 4096); 743 744 if (c4iw_wr_log) 745 debugfs_create_file_size("wr_log", S_IWUSR, devp->debugfs_root, 746 (void *)devp, &wr_log_debugfs_fops, 4096); 747 return 0; 748 } 749 750 void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev, 751 struct c4iw_dev_ucontext *uctx) 752 { 753 struct list_head *pos, *nxt; 754 struct c4iw_qid_list *entry; 755 756 mutex_lock(&uctx->lock); 757 list_for_each_safe(pos, nxt, &uctx->qpids) { 758 entry = list_entry(pos, struct c4iw_qid_list, entry); 759 list_del_init(&entry->entry); 760 if (!(entry->qid & rdev->qpmask)) { 761 c4iw_put_resource(&rdev->resource.qid_table, 762 entry->qid); 763 mutex_lock(&rdev->stats.lock); 764 rdev->stats.qid.cur -= rdev->qpmask + 1; 765 mutex_unlock(&rdev->stats.lock); 766 } 767 kfree(entry); 768 } 769 770 list_for_each_safe(pos, nxt, &uctx->qpids) { 771 entry = list_entry(pos, struct c4iw_qid_list, entry); 772 list_del_init(&entry->entry); 773 kfree(entry); 774 } 775 mutex_unlock(&uctx->lock); 776 } 777 778 void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev, 779 struct c4iw_dev_ucontext *uctx) 780 { 781 INIT_LIST_HEAD(&uctx->qpids); 782 INIT_LIST_HEAD(&uctx->cqids); 783 mutex_init(&uctx->lock); 784 } 785 786 /* Caller takes care of locking if needed */ 787 static int c4iw_rdev_open(struct c4iw_rdev *rdev) 788 { 789 int err; 790 791 c4iw_init_dev_ucontext(rdev, &rdev->uctx); 792 793 /* 794 * This implementation assumes udb_density == ucq_density! Eventually 795 * we might need to support this but for now fail the open. Also the 796 * cqid and qpid range must match for now. 797 */ 798 if (rdev->lldi.udb_density != rdev->lldi.ucq_density) { 799 pr_err("%s: unsupported udb/ucq densities %u/%u\n", 800 pci_name(rdev->lldi.pdev), rdev->lldi.udb_density, 801 rdev->lldi.ucq_density); 802 return -EINVAL; 803 } 804 if (rdev->lldi.vr->qp.start != rdev->lldi.vr->cq.start || 805 rdev->lldi.vr->qp.size != rdev->lldi.vr->cq.size) { 806 pr_err("%s: unsupported qp and cq id ranges qp start %u size %u cq start %u size %u\n", 807 pci_name(rdev->lldi.pdev), rdev->lldi.vr->qp.start, 808 rdev->lldi.vr->qp.size, rdev->lldi.vr->cq.size, 809 rdev->lldi.vr->cq.size); 810 return -EINVAL; 811 } 812 813 rdev->qpmask = rdev->lldi.udb_density - 1; 814 rdev->cqmask = rdev->lldi.ucq_density - 1; 815 pr_debug("%s dev %s stag start 0x%0x size 0x%0x num stags %d pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x qp qid start %u size %u cq qid start %u size %u\n", 816 __func__, pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start, 817 rdev->lldi.vr->stag.size, c4iw_num_stags(rdev), 818 rdev->lldi.vr->pbl.start, 819 rdev->lldi.vr->pbl.size, rdev->lldi.vr->rq.start, 820 rdev->lldi.vr->rq.size, 821 rdev->lldi.vr->qp.start, 822 rdev->lldi.vr->qp.size, 823 rdev->lldi.vr->cq.start, 824 rdev->lldi.vr->cq.size); 825 pr_debug("udb %pR db_reg %p gts_reg %p qpmask 0x%x cqmask 0x%x\n", 826 &rdev->lldi.pdev->resource[2], 827 rdev->lldi.db_reg, rdev->lldi.gts_reg, 828 rdev->qpmask, rdev->cqmask); 829 830 if (c4iw_num_stags(rdev) == 0) 831 return -EINVAL; 832 833 rdev->stats.pd.total = T4_MAX_NUM_PD; 834 rdev->stats.stag.total = rdev->lldi.vr->stag.size; 835 rdev->stats.pbl.total = rdev->lldi.vr->pbl.size; 836 rdev->stats.rqt.total = rdev->lldi.vr->rq.size; 837 rdev->stats.ocqp.total = rdev->lldi.vr->ocq.size; 838 rdev->stats.qid.total = rdev->lldi.vr->qp.size; 839 840 err = c4iw_init_resource(rdev, c4iw_num_stags(rdev), T4_MAX_NUM_PD); 841 if (err) { 842 pr_err("error %d initializing resources\n", err); 843 return err; 844 } 845 err = c4iw_pblpool_create(rdev); 846 if (err) { 847 pr_err("error %d initializing pbl pool\n", err); 848 goto destroy_resource; 849 } 850 err = c4iw_rqtpool_create(rdev); 851 if (err) { 852 pr_err("error %d initializing rqt pool\n", err); 853 goto destroy_pblpool; 854 } 855 err = c4iw_ocqp_pool_create(rdev); 856 if (err) { 857 pr_err("error %d initializing ocqp pool\n", err); 858 goto destroy_rqtpool; 859 } 860 rdev->status_page = (struct t4_dev_status_page *) 861 __get_free_page(GFP_KERNEL); 862 if (!rdev->status_page) { 863 err = -ENOMEM; 864 goto destroy_ocqp_pool; 865 } 866 rdev->status_page->qp_start = rdev->lldi.vr->qp.start; 867 rdev->status_page->qp_size = rdev->lldi.vr->qp.size; 868 rdev->status_page->cq_start = rdev->lldi.vr->cq.start; 869 rdev->status_page->cq_size = rdev->lldi.vr->cq.size; 870 871 if (c4iw_wr_log) { 872 rdev->wr_log = kzalloc((1 << c4iw_wr_log_size_order) * 873 sizeof(*rdev->wr_log), GFP_KERNEL); 874 if (rdev->wr_log) { 875 rdev->wr_log_size = 1 << c4iw_wr_log_size_order; 876 atomic_set(&rdev->wr_log_idx, 0); 877 } 878 } 879 880 rdev->free_workq = create_singlethread_workqueue("iw_cxgb4_free"); 881 if (!rdev->free_workq) { 882 err = -ENOMEM; 883 goto err_free_status_page; 884 } 885 886 rdev->status_page->db_off = 0; 887 888 return 0; 889 err_free_status_page: 890 free_page((unsigned long)rdev->status_page); 891 destroy_ocqp_pool: 892 c4iw_ocqp_pool_destroy(rdev); 893 destroy_rqtpool: 894 c4iw_rqtpool_destroy(rdev); 895 destroy_pblpool: 896 c4iw_pblpool_destroy(rdev); 897 destroy_resource: 898 c4iw_destroy_resource(&rdev->resource); 899 return err; 900 } 901 902 static void c4iw_rdev_close(struct c4iw_rdev *rdev) 903 { 904 destroy_workqueue(rdev->free_workq); 905 kfree(rdev->wr_log); 906 free_page((unsigned long)rdev->status_page); 907 c4iw_pblpool_destroy(rdev); 908 c4iw_rqtpool_destroy(rdev); 909 c4iw_destroy_resource(&rdev->resource); 910 } 911 912 static void c4iw_dealloc(struct uld_ctx *ctx) 913 { 914 c4iw_rdev_close(&ctx->dev->rdev); 915 WARN_ON_ONCE(!idr_is_empty(&ctx->dev->cqidr)); 916 idr_destroy(&ctx->dev->cqidr); 917 WARN_ON_ONCE(!idr_is_empty(&ctx->dev->qpidr)); 918 idr_destroy(&ctx->dev->qpidr); 919 WARN_ON_ONCE(!idr_is_empty(&ctx->dev->mmidr)); 920 idr_destroy(&ctx->dev->mmidr); 921 wait_event(ctx->dev->wait, idr_is_empty(&ctx->dev->hwtid_idr)); 922 idr_destroy(&ctx->dev->hwtid_idr); 923 idr_destroy(&ctx->dev->stid_idr); 924 idr_destroy(&ctx->dev->atid_idr); 925 if (ctx->dev->rdev.bar2_kva) 926 iounmap(ctx->dev->rdev.bar2_kva); 927 if (ctx->dev->rdev.oc_mw_kva) 928 iounmap(ctx->dev->rdev.oc_mw_kva); 929 ib_dealloc_device(&ctx->dev->ibdev); 930 ctx->dev = NULL; 931 } 932 933 static void c4iw_remove(struct uld_ctx *ctx) 934 { 935 pr_debug("%s c4iw_dev %p\n", __func__, ctx->dev); 936 c4iw_unregister_device(ctx->dev); 937 c4iw_dealloc(ctx); 938 } 939 940 static int rdma_supported(const struct cxgb4_lld_info *infop) 941 { 942 return infop->vr->stag.size > 0 && infop->vr->pbl.size > 0 && 943 infop->vr->rq.size > 0 && infop->vr->qp.size > 0 && 944 infop->vr->cq.size > 0; 945 } 946 947 static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) 948 { 949 struct c4iw_dev *devp; 950 int ret; 951 952 if (!rdma_supported(infop)) { 953 pr_info("%s: RDMA not supported on this device\n", 954 pci_name(infop->pdev)); 955 return ERR_PTR(-ENOSYS); 956 } 957 if (!ocqp_supported(infop)) 958 pr_info("%s: On-Chip Queues not supported on this device\n", 959 pci_name(infop->pdev)); 960 961 devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp)); 962 if (!devp) { 963 pr_err("Cannot allocate ib device\n"); 964 return ERR_PTR(-ENOMEM); 965 } 966 devp->rdev.lldi = *infop; 967 968 /* init various hw-queue params based on lld info */ 969 pr_debug("%s: Ing. padding boundary is %d, egrsstatuspagesize = %d\n", 970 __func__, devp->rdev.lldi.sge_ingpadboundary, 971 devp->rdev.lldi.sge_egrstatuspagesize); 972 973 devp->rdev.hw_queue.t4_eq_status_entries = 974 devp->rdev.lldi.sge_egrstatuspagesize / 64; 975 devp->rdev.hw_queue.t4_max_eq_size = 65520; 976 devp->rdev.hw_queue.t4_max_iq_size = 65520; 977 devp->rdev.hw_queue.t4_max_rq_size = 8192 - 978 devp->rdev.hw_queue.t4_eq_status_entries - 1; 979 devp->rdev.hw_queue.t4_max_sq_size = 980 devp->rdev.hw_queue.t4_max_eq_size - 981 devp->rdev.hw_queue.t4_eq_status_entries - 1; 982 devp->rdev.hw_queue.t4_max_qp_depth = 983 devp->rdev.hw_queue.t4_max_rq_size; 984 devp->rdev.hw_queue.t4_max_cq_depth = 985 devp->rdev.hw_queue.t4_max_iq_size - 2; 986 devp->rdev.hw_queue.t4_stat_len = 987 devp->rdev.lldi.sge_egrstatuspagesize; 988 989 /* 990 * For T5/T6 devices, we map all of BAR2 with WC. 991 * For T4 devices with onchip qp mem, we map only that part 992 * of BAR2 with WC. 993 */ 994 devp->rdev.bar2_pa = pci_resource_start(devp->rdev.lldi.pdev, 2); 995 if (!is_t4(devp->rdev.lldi.adapter_type)) { 996 devp->rdev.bar2_kva = ioremap_wc(devp->rdev.bar2_pa, 997 pci_resource_len(devp->rdev.lldi.pdev, 2)); 998 if (!devp->rdev.bar2_kva) { 999 pr_err("Unable to ioremap BAR2\n"); 1000 ib_dealloc_device(&devp->ibdev); 1001 return ERR_PTR(-EINVAL); 1002 } 1003 } else if (ocqp_supported(infop)) { 1004 devp->rdev.oc_mw_pa = 1005 pci_resource_start(devp->rdev.lldi.pdev, 2) + 1006 pci_resource_len(devp->rdev.lldi.pdev, 2) - 1007 roundup_pow_of_two(devp->rdev.lldi.vr->ocq.size); 1008 devp->rdev.oc_mw_kva = ioremap_wc(devp->rdev.oc_mw_pa, 1009 devp->rdev.lldi.vr->ocq.size); 1010 if (!devp->rdev.oc_mw_kva) { 1011 pr_err("Unable to ioremap onchip mem\n"); 1012 ib_dealloc_device(&devp->ibdev); 1013 return ERR_PTR(-EINVAL); 1014 } 1015 } 1016 1017 pr_debug("ocq memory: hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n", 1018 devp->rdev.lldi.vr->ocq.start, devp->rdev.lldi.vr->ocq.size, 1019 devp->rdev.oc_mw_pa, devp->rdev.oc_mw_kva); 1020 1021 ret = c4iw_rdev_open(&devp->rdev); 1022 if (ret) { 1023 pr_err("Unable to open CXIO rdev err %d\n", ret); 1024 ib_dealloc_device(&devp->ibdev); 1025 return ERR_PTR(ret); 1026 } 1027 1028 idr_init(&devp->cqidr); 1029 idr_init(&devp->qpidr); 1030 idr_init(&devp->mmidr); 1031 idr_init(&devp->hwtid_idr); 1032 idr_init(&devp->stid_idr); 1033 idr_init(&devp->atid_idr); 1034 spin_lock_init(&devp->lock); 1035 mutex_init(&devp->rdev.stats.lock); 1036 mutex_init(&devp->db_mutex); 1037 INIT_LIST_HEAD(&devp->db_fc_list); 1038 init_waitqueue_head(&devp->wait); 1039 devp->avail_ird = devp->rdev.lldi.max_ird_adapter; 1040 1041 if (c4iw_debugfs_root) { 1042 devp->debugfs_root = debugfs_create_dir( 1043 pci_name(devp->rdev.lldi.pdev), 1044 c4iw_debugfs_root); 1045 setup_debugfs(devp); 1046 } 1047 1048 1049 return devp; 1050 } 1051 1052 static void *c4iw_uld_add(const struct cxgb4_lld_info *infop) 1053 { 1054 struct uld_ctx *ctx; 1055 static int vers_printed; 1056 int i; 1057 1058 if (!vers_printed++) 1059 pr_info("Chelsio T4/T5 RDMA Driver - version %s\n", 1060 DRV_VERSION); 1061 1062 ctx = kzalloc(sizeof *ctx, GFP_KERNEL); 1063 if (!ctx) { 1064 ctx = ERR_PTR(-ENOMEM); 1065 goto out; 1066 } 1067 ctx->lldi = *infop; 1068 1069 pr_debug("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n", 1070 __func__, pci_name(ctx->lldi.pdev), 1071 ctx->lldi.nchan, ctx->lldi.nrxq, 1072 ctx->lldi.ntxq, ctx->lldi.nports); 1073 1074 mutex_lock(&dev_mutex); 1075 list_add_tail(&ctx->entry, &uld_ctx_list); 1076 mutex_unlock(&dev_mutex); 1077 1078 for (i = 0; i < ctx->lldi.nrxq; i++) 1079 pr_debug("rxqid[%u] %u\n", i, ctx->lldi.rxq_ids[i]); 1080 out: 1081 return ctx; 1082 } 1083 1084 static inline struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl, 1085 const __be64 *rsp, 1086 u32 pktshift) 1087 { 1088 struct sk_buff *skb; 1089 1090 /* 1091 * Allocate space for cpl_pass_accept_req which will be synthesized by 1092 * driver. Once the driver synthesizes the request the skb will go 1093 * through the regular cpl_pass_accept_req processing. 1094 * The math here assumes sizeof cpl_pass_accept_req >= sizeof 1095 * cpl_rx_pkt. 1096 */ 1097 skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req) + 1098 sizeof(struct rss_header) - pktshift, GFP_ATOMIC); 1099 if (unlikely(!skb)) 1100 return NULL; 1101 1102 __skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req) + 1103 sizeof(struct rss_header) - pktshift); 1104 1105 /* 1106 * This skb will contain: 1107 * rss_header from the rspq descriptor (1 flit) 1108 * cpl_rx_pkt struct from the rspq descriptor (2 flits) 1109 * space for the difference between the size of an 1110 * rx_pkt and pass_accept_req cpl (1 flit) 1111 * the packet data from the gl 1112 */ 1113 skb_copy_to_linear_data(skb, rsp, sizeof(struct cpl_pass_accept_req) + 1114 sizeof(struct rss_header)); 1115 skb_copy_to_linear_data_offset(skb, sizeof(struct rss_header) + 1116 sizeof(struct cpl_pass_accept_req), 1117 gl->va + pktshift, 1118 gl->tot_len - pktshift); 1119 return skb; 1120 } 1121 1122 static inline int recv_rx_pkt(struct c4iw_dev *dev, const struct pkt_gl *gl, 1123 const __be64 *rsp) 1124 { 1125 unsigned int opcode = *(u8 *)rsp; 1126 struct sk_buff *skb; 1127 1128 if (opcode != CPL_RX_PKT) 1129 goto out; 1130 1131 skb = copy_gl_to_skb_pkt(gl , rsp, dev->rdev.lldi.sge_pktshift); 1132 if (skb == NULL) 1133 goto out; 1134 1135 if (c4iw_handlers[opcode] == NULL) { 1136 pr_info("%s no handler opcode 0x%x...\n", __func__, opcode); 1137 kfree_skb(skb); 1138 goto out; 1139 } 1140 c4iw_handlers[opcode](dev, skb); 1141 return 1; 1142 out: 1143 return 0; 1144 } 1145 1146 static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp, 1147 const struct pkt_gl *gl) 1148 { 1149 struct uld_ctx *ctx = handle; 1150 struct c4iw_dev *dev = ctx->dev; 1151 struct sk_buff *skb; 1152 u8 opcode; 1153 1154 if (gl == NULL) { 1155 /* omit RSS and rsp_ctrl at end of descriptor */ 1156 unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8; 1157 1158 skb = alloc_skb(256, GFP_ATOMIC); 1159 if (!skb) 1160 goto nomem; 1161 __skb_put(skb, len); 1162 skb_copy_to_linear_data(skb, &rsp[1], len); 1163 } else if (gl == CXGB4_MSG_AN) { 1164 const struct rsp_ctrl *rc = (void *)rsp; 1165 1166 u32 qid = be32_to_cpu(rc->pldbuflen_qid); 1167 c4iw_ev_handler(dev, qid); 1168 return 0; 1169 } else if (unlikely(*(u8 *)rsp != *(u8 *)gl->va)) { 1170 if (recv_rx_pkt(dev, gl, rsp)) 1171 return 0; 1172 1173 pr_info("%s: unexpected FL contents at %p, RSS %#llx, FL %#llx, len %u\n", 1174 pci_name(ctx->lldi.pdev), gl->va, 1175 be64_to_cpu(*rsp), 1176 be64_to_cpu(*(__force __be64 *)gl->va), 1177 gl->tot_len); 1178 1179 return 0; 1180 } else { 1181 skb = cxgb4_pktgl_to_skb(gl, 128, 128); 1182 if (unlikely(!skb)) 1183 goto nomem; 1184 } 1185 1186 opcode = *(u8 *)rsp; 1187 if (c4iw_handlers[opcode]) { 1188 c4iw_handlers[opcode](dev, skb); 1189 } else { 1190 pr_info("%s no handler opcode 0x%x...\n", __func__, opcode); 1191 kfree_skb(skb); 1192 } 1193 1194 return 0; 1195 nomem: 1196 return -1; 1197 } 1198 1199 static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state) 1200 { 1201 struct uld_ctx *ctx = handle; 1202 1203 pr_debug("%s new_state %u\n", __func__, new_state); 1204 switch (new_state) { 1205 case CXGB4_STATE_UP: 1206 pr_info("%s: Up\n", pci_name(ctx->lldi.pdev)); 1207 if (!ctx->dev) { 1208 int ret; 1209 1210 ctx->dev = c4iw_alloc(&ctx->lldi); 1211 if (IS_ERR(ctx->dev)) { 1212 pr_err("%s: initialization failed: %ld\n", 1213 pci_name(ctx->lldi.pdev), 1214 PTR_ERR(ctx->dev)); 1215 ctx->dev = NULL; 1216 break; 1217 } 1218 ret = c4iw_register_device(ctx->dev); 1219 if (ret) { 1220 pr_err("%s: RDMA registration failed: %d\n", 1221 pci_name(ctx->lldi.pdev), ret); 1222 c4iw_dealloc(ctx); 1223 } 1224 } 1225 break; 1226 case CXGB4_STATE_DOWN: 1227 pr_info("%s: Down\n", pci_name(ctx->lldi.pdev)); 1228 if (ctx->dev) 1229 c4iw_remove(ctx); 1230 break; 1231 case CXGB4_STATE_START_RECOVERY: 1232 pr_info("%s: Fatal Error\n", pci_name(ctx->lldi.pdev)); 1233 if (ctx->dev) { 1234 struct ib_event event; 1235 1236 ctx->dev->rdev.flags |= T4_FATAL_ERROR; 1237 memset(&event, 0, sizeof event); 1238 event.event = IB_EVENT_DEVICE_FATAL; 1239 event.device = &ctx->dev->ibdev; 1240 ib_dispatch_event(&event); 1241 c4iw_remove(ctx); 1242 } 1243 break; 1244 case CXGB4_STATE_DETACH: 1245 pr_info("%s: Detach\n", pci_name(ctx->lldi.pdev)); 1246 if (ctx->dev) 1247 c4iw_remove(ctx); 1248 break; 1249 } 1250 return 0; 1251 } 1252 1253 static int disable_qp_db(int id, void *p, void *data) 1254 { 1255 struct c4iw_qp *qp = p; 1256 1257 t4_disable_wq_db(&qp->wq); 1258 return 0; 1259 } 1260 1261 static void stop_queues(struct uld_ctx *ctx) 1262 { 1263 unsigned long flags; 1264 1265 spin_lock_irqsave(&ctx->dev->lock, flags); 1266 ctx->dev->rdev.stats.db_state_transitions++; 1267 ctx->dev->db_state = STOPPED; 1268 if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED) 1269 idr_for_each(&ctx->dev->qpidr, disable_qp_db, NULL); 1270 else 1271 ctx->dev->rdev.status_page->db_off = 1; 1272 spin_unlock_irqrestore(&ctx->dev->lock, flags); 1273 } 1274 1275 static int enable_qp_db(int id, void *p, void *data) 1276 { 1277 struct c4iw_qp *qp = p; 1278 1279 t4_enable_wq_db(&qp->wq); 1280 return 0; 1281 } 1282 1283 static void resume_rc_qp(struct c4iw_qp *qp) 1284 { 1285 spin_lock(&qp->lock); 1286 t4_ring_sq_db(&qp->wq, qp->wq.sq.wq_pidx_inc, NULL); 1287 qp->wq.sq.wq_pidx_inc = 0; 1288 t4_ring_rq_db(&qp->wq, qp->wq.rq.wq_pidx_inc, NULL); 1289 qp->wq.rq.wq_pidx_inc = 0; 1290 spin_unlock(&qp->lock); 1291 } 1292 1293 static void resume_a_chunk(struct uld_ctx *ctx) 1294 { 1295 int i; 1296 struct c4iw_qp *qp; 1297 1298 for (i = 0; i < DB_FC_RESUME_SIZE; i++) { 1299 qp = list_first_entry(&ctx->dev->db_fc_list, struct c4iw_qp, 1300 db_fc_entry); 1301 list_del_init(&qp->db_fc_entry); 1302 resume_rc_qp(qp); 1303 if (list_empty(&ctx->dev->db_fc_list)) 1304 break; 1305 } 1306 } 1307 1308 static void resume_queues(struct uld_ctx *ctx) 1309 { 1310 spin_lock_irq(&ctx->dev->lock); 1311 if (ctx->dev->db_state != STOPPED) 1312 goto out; 1313 ctx->dev->db_state = FLOW_CONTROL; 1314 while (1) { 1315 if (list_empty(&ctx->dev->db_fc_list)) { 1316 WARN_ON(ctx->dev->db_state != FLOW_CONTROL); 1317 ctx->dev->db_state = NORMAL; 1318 ctx->dev->rdev.stats.db_state_transitions++; 1319 if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED) { 1320 idr_for_each(&ctx->dev->qpidr, enable_qp_db, 1321 NULL); 1322 } else { 1323 ctx->dev->rdev.status_page->db_off = 0; 1324 } 1325 break; 1326 } else { 1327 if (cxgb4_dbfifo_count(ctx->dev->rdev.lldi.ports[0], 1) 1328 < (ctx->dev->rdev.lldi.dbfifo_int_thresh << 1329 DB_FC_DRAIN_THRESH)) { 1330 resume_a_chunk(ctx); 1331 } 1332 if (!list_empty(&ctx->dev->db_fc_list)) { 1333 spin_unlock_irq(&ctx->dev->lock); 1334 if (DB_FC_RESUME_DELAY) { 1335 set_current_state(TASK_UNINTERRUPTIBLE); 1336 schedule_timeout(DB_FC_RESUME_DELAY); 1337 } 1338 spin_lock_irq(&ctx->dev->lock); 1339 if (ctx->dev->db_state != FLOW_CONTROL) 1340 break; 1341 } 1342 } 1343 } 1344 out: 1345 if (ctx->dev->db_state != NORMAL) 1346 ctx->dev->rdev.stats.db_fc_interruptions++; 1347 spin_unlock_irq(&ctx->dev->lock); 1348 } 1349 1350 struct qp_list { 1351 unsigned idx; 1352 struct c4iw_qp **qps; 1353 }; 1354 1355 static int add_and_ref_qp(int id, void *p, void *data) 1356 { 1357 struct qp_list *qp_listp = data; 1358 struct c4iw_qp *qp = p; 1359 1360 c4iw_qp_add_ref(&qp->ibqp); 1361 qp_listp->qps[qp_listp->idx++] = qp; 1362 return 0; 1363 } 1364 1365 static int count_qps(int id, void *p, void *data) 1366 { 1367 unsigned *countp = data; 1368 (*countp)++; 1369 return 0; 1370 } 1371 1372 static void deref_qps(struct qp_list *qp_list) 1373 { 1374 int idx; 1375 1376 for (idx = 0; idx < qp_list->idx; idx++) 1377 c4iw_qp_rem_ref(&qp_list->qps[idx]->ibqp); 1378 } 1379 1380 static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list) 1381 { 1382 int idx; 1383 int ret; 1384 1385 for (idx = 0; idx < qp_list->idx; idx++) { 1386 struct c4iw_qp *qp = qp_list->qps[idx]; 1387 1388 spin_lock_irq(&qp->rhp->lock); 1389 spin_lock(&qp->lock); 1390 ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0], 1391 qp->wq.sq.qid, 1392 t4_sq_host_wq_pidx(&qp->wq), 1393 t4_sq_wq_size(&qp->wq)); 1394 if (ret) { 1395 pr_err("%s: Fatal error - DB overflow recovery failed - error syncing SQ qid %u\n", 1396 pci_name(ctx->lldi.pdev), qp->wq.sq.qid); 1397 spin_unlock(&qp->lock); 1398 spin_unlock_irq(&qp->rhp->lock); 1399 return; 1400 } 1401 qp->wq.sq.wq_pidx_inc = 0; 1402 1403 ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0], 1404 qp->wq.rq.qid, 1405 t4_rq_host_wq_pidx(&qp->wq), 1406 t4_rq_wq_size(&qp->wq)); 1407 1408 if (ret) { 1409 pr_err("%s: Fatal error - DB overflow recovery failed - error syncing RQ qid %u\n", 1410 pci_name(ctx->lldi.pdev), qp->wq.rq.qid); 1411 spin_unlock(&qp->lock); 1412 spin_unlock_irq(&qp->rhp->lock); 1413 return; 1414 } 1415 qp->wq.rq.wq_pidx_inc = 0; 1416 spin_unlock(&qp->lock); 1417 spin_unlock_irq(&qp->rhp->lock); 1418 1419 /* Wait for the dbfifo to drain */ 1420 while (cxgb4_dbfifo_count(qp->rhp->rdev.lldi.ports[0], 1) > 0) { 1421 set_current_state(TASK_UNINTERRUPTIBLE); 1422 schedule_timeout(usecs_to_jiffies(10)); 1423 } 1424 } 1425 } 1426 1427 static void recover_queues(struct uld_ctx *ctx) 1428 { 1429 int count = 0; 1430 struct qp_list qp_list; 1431 int ret; 1432 1433 /* slow everybody down */ 1434 set_current_state(TASK_UNINTERRUPTIBLE); 1435 schedule_timeout(usecs_to_jiffies(1000)); 1436 1437 /* flush the SGE contexts */ 1438 ret = cxgb4_flush_eq_cache(ctx->dev->rdev.lldi.ports[0]); 1439 if (ret) { 1440 pr_err("%s: Fatal error - DB overflow recovery failed\n", 1441 pci_name(ctx->lldi.pdev)); 1442 return; 1443 } 1444 1445 /* Count active queues so we can build a list of queues to recover */ 1446 spin_lock_irq(&ctx->dev->lock); 1447 WARN_ON(ctx->dev->db_state != STOPPED); 1448 ctx->dev->db_state = RECOVERY; 1449 idr_for_each(&ctx->dev->qpidr, count_qps, &count); 1450 1451 qp_list.qps = kzalloc(count * sizeof *qp_list.qps, GFP_ATOMIC); 1452 if (!qp_list.qps) { 1453 spin_unlock_irq(&ctx->dev->lock); 1454 return; 1455 } 1456 qp_list.idx = 0; 1457 1458 /* add and ref each qp so it doesn't get freed */ 1459 idr_for_each(&ctx->dev->qpidr, add_and_ref_qp, &qp_list); 1460 1461 spin_unlock_irq(&ctx->dev->lock); 1462 1463 /* now traverse the list in a safe context to recover the db state*/ 1464 recover_lost_dbs(ctx, &qp_list); 1465 1466 /* we're almost done! deref the qps and clean up */ 1467 deref_qps(&qp_list); 1468 kfree(qp_list.qps); 1469 1470 spin_lock_irq(&ctx->dev->lock); 1471 WARN_ON(ctx->dev->db_state != RECOVERY); 1472 ctx->dev->db_state = STOPPED; 1473 spin_unlock_irq(&ctx->dev->lock); 1474 } 1475 1476 static int c4iw_uld_control(void *handle, enum cxgb4_control control, ...) 1477 { 1478 struct uld_ctx *ctx = handle; 1479 1480 switch (control) { 1481 case CXGB4_CONTROL_DB_FULL: 1482 stop_queues(ctx); 1483 ctx->dev->rdev.stats.db_full++; 1484 break; 1485 case CXGB4_CONTROL_DB_EMPTY: 1486 resume_queues(ctx); 1487 mutex_lock(&ctx->dev->rdev.stats.lock); 1488 ctx->dev->rdev.stats.db_empty++; 1489 mutex_unlock(&ctx->dev->rdev.stats.lock); 1490 break; 1491 case CXGB4_CONTROL_DB_DROP: 1492 recover_queues(ctx); 1493 mutex_lock(&ctx->dev->rdev.stats.lock); 1494 ctx->dev->rdev.stats.db_drop++; 1495 mutex_unlock(&ctx->dev->rdev.stats.lock); 1496 break; 1497 default: 1498 pr_warn("%s: unknown control cmd %u\n", 1499 pci_name(ctx->lldi.pdev), control); 1500 break; 1501 } 1502 return 0; 1503 } 1504 1505 static struct cxgb4_uld_info c4iw_uld_info = { 1506 .name = DRV_NAME, 1507 .nrxq = MAX_ULD_QSETS, 1508 .ntxq = MAX_ULD_QSETS, 1509 .rxq_size = 511, 1510 .ciq = true, 1511 .lro = false, 1512 .add = c4iw_uld_add, 1513 .rx_handler = c4iw_uld_rx_handler, 1514 .state_change = c4iw_uld_state_change, 1515 .control = c4iw_uld_control, 1516 }; 1517 1518 static int __init c4iw_init_module(void) 1519 { 1520 int err; 1521 1522 err = c4iw_cm_init(); 1523 if (err) 1524 return err; 1525 1526 c4iw_debugfs_root = debugfs_create_dir(DRV_NAME, NULL); 1527 if (!c4iw_debugfs_root) 1528 pr_warn("could not create debugfs entry, continuing\n"); 1529 1530 cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info); 1531 1532 return 0; 1533 } 1534 1535 static void __exit c4iw_exit_module(void) 1536 { 1537 struct uld_ctx *ctx, *tmp; 1538 1539 mutex_lock(&dev_mutex); 1540 list_for_each_entry_safe(ctx, tmp, &uld_ctx_list, entry) { 1541 if (ctx->dev) 1542 c4iw_remove(ctx); 1543 kfree(ctx); 1544 } 1545 mutex_unlock(&dev_mutex); 1546 cxgb4_unregister_uld(CXGB4_ULD_RDMA); 1547 c4iw_cm_term(); 1548 debugfs_remove_recursive(c4iw_debugfs_root); 1549 } 1550 1551 module_init(c4iw_init_module); 1552 module_exit(c4iw_exit_module); 1553