1 /* 2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 #include <linux/module.h> 33 #include <linux/moduleparam.h> 34 #include <linux/debugfs.h> 35 #include <linux/vmalloc.h> 36 #include <linux/math64.h> 37 38 #include <rdma/ib_verbs.h> 39 40 #include "iw_cxgb4.h" 41 42 #define DRV_VERSION "0.1" 43 44 MODULE_AUTHOR("Steve Wise"); 45 MODULE_DESCRIPTION("Chelsio T4/T5 RDMA Driver"); 46 MODULE_LICENSE("Dual BSD/GPL"); 47 48 static int allow_db_fc_on_t5; 49 module_param(allow_db_fc_on_t5, int, 0644); 50 MODULE_PARM_DESC(allow_db_fc_on_t5, 51 "Allow DB Flow Control on T5 (default = 0)"); 52 53 static int allow_db_coalescing_on_t5; 54 module_param(allow_db_coalescing_on_t5, int, 0644); 55 MODULE_PARM_DESC(allow_db_coalescing_on_t5, 56 "Allow DB Coalescing on T5 (default = 0)"); 57 58 int c4iw_wr_log = 0; 59 module_param(c4iw_wr_log, int, 0444); 60 MODULE_PARM_DESC(c4iw_wr_log, "Enables logging of work request timing data."); 61 62 static int c4iw_wr_log_size_order = 12; 63 module_param(c4iw_wr_log_size_order, int, 0444); 64 MODULE_PARM_DESC(c4iw_wr_log_size_order, 65 "Number of entries (log2) in the work request timing log."); 66 67 static LIST_HEAD(uld_ctx_list); 68 static DEFINE_MUTEX(dev_mutex); 69 static struct workqueue_struct *reg_workq; 70 71 #define DB_FC_RESUME_SIZE 64 72 #define DB_FC_RESUME_DELAY 1 73 #define DB_FC_DRAIN_THRESH 0 74 75 static struct dentry *c4iw_debugfs_root; 76 77 struct c4iw_debugfs_data { 78 struct c4iw_dev *devp; 79 char *buf; 80 int bufsize; 81 int pos; 82 }; 83 84 static int count_idrs(int id, void *p, void *data) 85 { 86 int *countp = data; 87 88 *countp = *countp + 1; 89 return 0; 90 } 91 92 static ssize_t debugfs_read(struct file *file, char __user *buf, size_t count, 93 loff_t *ppos) 94 { 95 struct c4iw_debugfs_data *d = file->private_data; 96 97 return simple_read_from_buffer(buf, count, ppos, d->buf, d->pos); 98 } 99 100 void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe) 101 { 102 struct wr_log_entry le; 103 int idx; 104 105 if (!wq->rdev->wr_log) 106 return; 107 108 idx = (atomic_inc_return(&wq->rdev->wr_log_idx) - 1) & 109 (wq->rdev->wr_log_size - 1); 110 le.poll_sge_ts = cxgb4_read_sge_timestamp(wq->rdev->lldi.ports[0]); 111 le.poll_host_time = ktime_get(); 112 le.valid = 1; 113 le.cqe_sge_ts = CQE_TS(cqe); 114 if (SQ_TYPE(cqe)) { 115 le.qid = wq->sq.qid; 116 le.opcode = CQE_OPCODE(cqe); 117 le.post_host_time = wq->sq.sw_sq[wq->sq.cidx].host_time; 118 le.post_sge_ts = wq->sq.sw_sq[wq->sq.cidx].sge_ts; 119 le.wr_id = CQE_WRID_SQ_IDX(cqe); 120 } else { 121 le.qid = wq->rq.qid; 122 le.opcode = FW_RI_RECEIVE; 123 le.post_host_time = wq->rq.sw_rq[wq->rq.cidx].host_time; 124 le.post_sge_ts = wq->rq.sw_rq[wq->rq.cidx].sge_ts; 125 le.wr_id = CQE_WRID_MSN(cqe); 126 } 127 wq->rdev->wr_log[idx] = le; 128 } 129 130 static int wr_log_show(struct seq_file *seq, void *v) 131 { 132 struct c4iw_dev *dev = seq->private; 133 ktime_t prev_time; 134 struct wr_log_entry *lep; 135 int prev_time_set = 0; 136 int idx, end; 137 138 #define ts2ns(ts) div64_u64((ts) * dev->rdev.lldi.cclk_ps, 1000) 139 140 idx = atomic_read(&dev->rdev.wr_log_idx) & 141 (dev->rdev.wr_log_size - 1); 142 end = idx - 1; 143 if (end < 0) 144 end = dev->rdev.wr_log_size - 1; 145 lep = &dev->rdev.wr_log[idx]; 146 while (idx != end) { 147 if (lep->valid) { 148 if (!prev_time_set) { 149 prev_time_set = 1; 150 prev_time = lep->poll_host_time; 151 } 152 seq_printf(seq, "%04u: nsec %llu qid %u opcode " 153 "%u %s 0x%x host_wr_delta nsec %llu " 154 "post_sge_ts 0x%llx cqe_sge_ts 0x%llx " 155 "poll_sge_ts 0x%llx post_poll_delta_ns %llu " 156 "cqe_poll_delta_ns %llu\n", 157 idx, 158 ktime_to_ns(ktime_sub(lep->poll_host_time, 159 prev_time)), 160 lep->qid, lep->opcode, 161 lep->opcode == FW_RI_RECEIVE ? 162 "msn" : "wrid", 163 lep->wr_id, 164 ktime_to_ns(ktime_sub(lep->poll_host_time, 165 lep->post_host_time)), 166 lep->post_sge_ts, lep->cqe_sge_ts, 167 lep->poll_sge_ts, 168 ts2ns(lep->poll_sge_ts - lep->post_sge_ts), 169 ts2ns(lep->poll_sge_ts - lep->cqe_sge_ts)); 170 prev_time = lep->poll_host_time; 171 } 172 idx++; 173 if (idx > (dev->rdev.wr_log_size - 1)) 174 idx = 0; 175 lep = &dev->rdev.wr_log[idx]; 176 } 177 #undef ts2ns 178 return 0; 179 } 180 181 static int wr_log_open(struct inode *inode, struct file *file) 182 { 183 return single_open(file, wr_log_show, inode->i_private); 184 } 185 186 static ssize_t wr_log_clear(struct file *file, const char __user *buf, 187 size_t count, loff_t *pos) 188 { 189 struct c4iw_dev *dev = ((struct seq_file *)file->private_data)->private; 190 int i; 191 192 if (dev->rdev.wr_log) 193 for (i = 0; i < dev->rdev.wr_log_size; i++) 194 dev->rdev.wr_log[i].valid = 0; 195 return count; 196 } 197 198 static const struct file_operations wr_log_debugfs_fops = { 199 .owner = THIS_MODULE, 200 .open = wr_log_open, 201 .release = single_release, 202 .read = seq_read, 203 .llseek = seq_lseek, 204 .write = wr_log_clear, 205 }; 206 207 static struct sockaddr_in zero_sin = { 208 .sin_family = AF_INET, 209 }; 210 211 static struct sockaddr_in6 zero_sin6 = { 212 .sin6_family = AF_INET6, 213 }; 214 215 static void set_ep_sin_addrs(struct c4iw_ep *ep, 216 struct sockaddr_in **lsin, 217 struct sockaddr_in **rsin, 218 struct sockaddr_in **m_lsin, 219 struct sockaddr_in **m_rsin) 220 { 221 struct iw_cm_id *id = ep->com.cm_id; 222 223 *m_lsin = (struct sockaddr_in *)&ep->com.local_addr; 224 *m_rsin = (struct sockaddr_in *)&ep->com.remote_addr; 225 if (id) { 226 *lsin = (struct sockaddr_in *)&id->local_addr; 227 *rsin = (struct sockaddr_in *)&id->remote_addr; 228 } else { 229 *lsin = &zero_sin; 230 *rsin = &zero_sin; 231 } 232 } 233 234 static void set_ep_sin6_addrs(struct c4iw_ep *ep, 235 struct sockaddr_in6 **lsin6, 236 struct sockaddr_in6 **rsin6, 237 struct sockaddr_in6 **m_lsin6, 238 struct sockaddr_in6 **m_rsin6) 239 { 240 struct iw_cm_id *id = ep->com.cm_id; 241 242 *m_lsin6 = (struct sockaddr_in6 *)&ep->com.local_addr; 243 *m_rsin6 = (struct sockaddr_in6 *)&ep->com.remote_addr; 244 if (id) { 245 *lsin6 = (struct sockaddr_in6 *)&id->local_addr; 246 *rsin6 = (struct sockaddr_in6 *)&id->remote_addr; 247 } else { 248 *lsin6 = &zero_sin6; 249 *rsin6 = &zero_sin6; 250 } 251 } 252 253 static int dump_qp(int id, void *p, void *data) 254 { 255 struct c4iw_qp *qp = p; 256 struct c4iw_debugfs_data *qpd = data; 257 int space; 258 int cc; 259 260 if (id != qp->wq.sq.qid) 261 return 0; 262 263 space = qpd->bufsize - qpd->pos - 1; 264 if (space == 0) 265 return 1; 266 267 if (qp->ep) { 268 struct c4iw_ep *ep = qp->ep; 269 270 if (ep->com.local_addr.ss_family == AF_INET) { 271 struct sockaddr_in *lsin; 272 struct sockaddr_in *rsin; 273 struct sockaddr_in *m_lsin; 274 struct sockaddr_in *m_rsin; 275 276 set_ep_sin_addrs(ep, &lsin, &rsin, &m_lsin, &m_rsin); 277 cc = snprintf(qpd->buf + qpd->pos, space, 278 "rc qp sq id %u rq id %u state %u " 279 "onchip %u ep tid %u state %u " 280 "%pI4:%u/%u->%pI4:%u/%u\n", 281 qp->wq.sq.qid, qp->wq.rq.qid, 282 (int)qp->attr.state, 283 qp->wq.sq.flags & T4_SQ_ONCHIP, 284 ep->hwtid, (int)ep->com.state, 285 &lsin->sin_addr, ntohs(lsin->sin_port), 286 ntohs(m_lsin->sin_port), 287 &rsin->sin_addr, ntohs(rsin->sin_port), 288 ntohs(m_rsin->sin_port)); 289 } else { 290 struct sockaddr_in6 *lsin6; 291 struct sockaddr_in6 *rsin6; 292 struct sockaddr_in6 *m_lsin6; 293 struct sockaddr_in6 *m_rsin6; 294 295 set_ep_sin6_addrs(ep, &lsin6, &rsin6, &m_lsin6, 296 &m_rsin6); 297 cc = snprintf(qpd->buf + qpd->pos, space, 298 "rc qp sq id %u rq id %u state %u " 299 "onchip %u ep tid %u state %u " 300 "%pI6:%u/%u->%pI6:%u/%u\n", 301 qp->wq.sq.qid, qp->wq.rq.qid, 302 (int)qp->attr.state, 303 qp->wq.sq.flags & T4_SQ_ONCHIP, 304 ep->hwtid, (int)ep->com.state, 305 &lsin6->sin6_addr, 306 ntohs(lsin6->sin6_port), 307 ntohs(m_lsin6->sin6_port), 308 &rsin6->sin6_addr, 309 ntohs(rsin6->sin6_port), 310 ntohs(m_rsin6->sin6_port)); 311 } 312 } else 313 cc = snprintf(qpd->buf + qpd->pos, space, 314 "qp sq id %u rq id %u state %u onchip %u\n", 315 qp->wq.sq.qid, qp->wq.rq.qid, 316 (int)qp->attr.state, 317 qp->wq.sq.flags & T4_SQ_ONCHIP); 318 if (cc < space) 319 qpd->pos += cc; 320 return 0; 321 } 322 323 static int qp_release(struct inode *inode, struct file *file) 324 { 325 struct c4iw_debugfs_data *qpd = file->private_data; 326 if (!qpd) { 327 pr_info("%s null qpd?\n", __func__); 328 return 0; 329 } 330 vfree(qpd->buf); 331 kfree(qpd); 332 return 0; 333 } 334 335 static int qp_open(struct inode *inode, struct file *file) 336 { 337 struct c4iw_debugfs_data *qpd; 338 int count = 1; 339 340 qpd = kmalloc(sizeof *qpd, GFP_KERNEL); 341 if (!qpd) 342 return -ENOMEM; 343 344 qpd->devp = inode->i_private; 345 qpd->pos = 0; 346 347 spin_lock_irq(&qpd->devp->lock); 348 idr_for_each(&qpd->devp->qpidr, count_idrs, &count); 349 spin_unlock_irq(&qpd->devp->lock); 350 351 qpd->bufsize = count * 180; 352 qpd->buf = vmalloc(qpd->bufsize); 353 if (!qpd->buf) { 354 kfree(qpd); 355 return -ENOMEM; 356 } 357 358 spin_lock_irq(&qpd->devp->lock); 359 idr_for_each(&qpd->devp->qpidr, dump_qp, qpd); 360 spin_unlock_irq(&qpd->devp->lock); 361 362 qpd->buf[qpd->pos++] = 0; 363 file->private_data = qpd; 364 return 0; 365 } 366 367 static const struct file_operations qp_debugfs_fops = { 368 .owner = THIS_MODULE, 369 .open = qp_open, 370 .release = qp_release, 371 .read = debugfs_read, 372 .llseek = default_llseek, 373 }; 374 375 static int dump_stag(int id, void *p, void *data) 376 { 377 struct c4iw_debugfs_data *stagd = data; 378 int space; 379 int cc; 380 struct fw_ri_tpte tpte; 381 int ret; 382 383 space = stagd->bufsize - stagd->pos - 1; 384 if (space == 0) 385 return 1; 386 387 ret = cxgb4_read_tpte(stagd->devp->rdev.lldi.ports[0], (u32)id<<8, 388 (__be32 *)&tpte); 389 if (ret) { 390 dev_err(&stagd->devp->rdev.lldi.pdev->dev, 391 "%s cxgb4_read_tpte err %d\n", __func__, ret); 392 return ret; 393 } 394 cc = snprintf(stagd->buf + stagd->pos, space, 395 "stag: idx 0x%x valid %d key 0x%x state %d pdid %d " 396 "perm 0x%x ps %d len 0x%llx va 0x%llx\n", 397 (u32)id<<8, 398 FW_RI_TPTE_VALID_G(ntohl(tpte.valid_to_pdid)), 399 FW_RI_TPTE_STAGKEY_G(ntohl(tpte.valid_to_pdid)), 400 FW_RI_TPTE_STAGSTATE_G(ntohl(tpte.valid_to_pdid)), 401 FW_RI_TPTE_PDID_G(ntohl(tpte.valid_to_pdid)), 402 FW_RI_TPTE_PERM_G(ntohl(tpte.locread_to_qpid)), 403 FW_RI_TPTE_PS_G(ntohl(tpte.locread_to_qpid)), 404 ((u64)ntohl(tpte.len_hi) << 32) | ntohl(tpte.len_lo), 405 ((u64)ntohl(tpte.va_hi) << 32) | ntohl(tpte.va_lo_fbo)); 406 if (cc < space) 407 stagd->pos += cc; 408 return 0; 409 } 410 411 static int stag_release(struct inode *inode, struct file *file) 412 { 413 struct c4iw_debugfs_data *stagd = file->private_data; 414 if (!stagd) { 415 pr_info("%s null stagd?\n", __func__); 416 return 0; 417 } 418 vfree(stagd->buf); 419 kfree(stagd); 420 return 0; 421 } 422 423 static int stag_open(struct inode *inode, struct file *file) 424 { 425 struct c4iw_debugfs_data *stagd; 426 int ret = 0; 427 int count = 1; 428 429 stagd = kmalloc(sizeof *stagd, GFP_KERNEL); 430 if (!stagd) { 431 ret = -ENOMEM; 432 goto out; 433 } 434 stagd->devp = inode->i_private; 435 stagd->pos = 0; 436 437 spin_lock_irq(&stagd->devp->lock); 438 idr_for_each(&stagd->devp->mmidr, count_idrs, &count); 439 spin_unlock_irq(&stagd->devp->lock); 440 441 stagd->bufsize = count * 256; 442 stagd->buf = vmalloc(stagd->bufsize); 443 if (!stagd->buf) { 444 ret = -ENOMEM; 445 goto err1; 446 } 447 448 spin_lock_irq(&stagd->devp->lock); 449 idr_for_each(&stagd->devp->mmidr, dump_stag, stagd); 450 spin_unlock_irq(&stagd->devp->lock); 451 452 stagd->buf[stagd->pos++] = 0; 453 file->private_data = stagd; 454 goto out; 455 err1: 456 kfree(stagd); 457 out: 458 return ret; 459 } 460 461 static const struct file_operations stag_debugfs_fops = { 462 .owner = THIS_MODULE, 463 .open = stag_open, 464 .release = stag_release, 465 .read = debugfs_read, 466 .llseek = default_llseek, 467 }; 468 469 static char *db_state_str[] = {"NORMAL", "FLOW_CONTROL", "RECOVERY", "STOPPED"}; 470 471 static int stats_show(struct seq_file *seq, void *v) 472 { 473 struct c4iw_dev *dev = seq->private; 474 475 seq_printf(seq, " Object: %10s %10s %10s %10s\n", "Total", "Current", 476 "Max", "Fail"); 477 seq_printf(seq, " PDID: %10llu %10llu %10llu %10llu\n", 478 dev->rdev.stats.pd.total, dev->rdev.stats.pd.cur, 479 dev->rdev.stats.pd.max, dev->rdev.stats.pd.fail); 480 seq_printf(seq, " QID: %10llu %10llu %10llu %10llu\n", 481 dev->rdev.stats.qid.total, dev->rdev.stats.qid.cur, 482 dev->rdev.stats.qid.max, dev->rdev.stats.qid.fail); 483 seq_printf(seq, " TPTMEM: %10llu %10llu %10llu %10llu\n", 484 dev->rdev.stats.stag.total, dev->rdev.stats.stag.cur, 485 dev->rdev.stats.stag.max, dev->rdev.stats.stag.fail); 486 seq_printf(seq, " PBLMEM: %10llu %10llu %10llu %10llu\n", 487 dev->rdev.stats.pbl.total, dev->rdev.stats.pbl.cur, 488 dev->rdev.stats.pbl.max, dev->rdev.stats.pbl.fail); 489 seq_printf(seq, " RQTMEM: %10llu %10llu %10llu %10llu\n", 490 dev->rdev.stats.rqt.total, dev->rdev.stats.rqt.cur, 491 dev->rdev.stats.rqt.max, dev->rdev.stats.rqt.fail); 492 seq_printf(seq, " OCQPMEM: %10llu %10llu %10llu %10llu\n", 493 dev->rdev.stats.ocqp.total, dev->rdev.stats.ocqp.cur, 494 dev->rdev.stats.ocqp.max, dev->rdev.stats.ocqp.fail); 495 seq_printf(seq, " DB FULL: %10llu\n", dev->rdev.stats.db_full); 496 seq_printf(seq, " DB EMPTY: %10llu\n", dev->rdev.stats.db_empty); 497 seq_printf(seq, " DB DROP: %10llu\n", dev->rdev.stats.db_drop); 498 seq_printf(seq, " DB State: %s Transitions %llu FC Interruptions %llu\n", 499 db_state_str[dev->db_state], 500 dev->rdev.stats.db_state_transitions, 501 dev->rdev.stats.db_fc_interruptions); 502 seq_printf(seq, "TCAM_FULL: %10llu\n", dev->rdev.stats.tcam_full); 503 seq_printf(seq, "ACT_OFLD_CONN_FAILS: %10llu\n", 504 dev->rdev.stats.act_ofld_conn_fails); 505 seq_printf(seq, "PAS_OFLD_CONN_FAILS: %10llu\n", 506 dev->rdev.stats.pas_ofld_conn_fails); 507 seq_printf(seq, "NEG_ADV_RCVD: %10llu\n", dev->rdev.stats.neg_adv); 508 seq_printf(seq, "AVAILABLE IRD: %10u\n", dev->avail_ird); 509 return 0; 510 } 511 512 static int stats_open(struct inode *inode, struct file *file) 513 { 514 return single_open(file, stats_show, inode->i_private); 515 } 516 517 static ssize_t stats_clear(struct file *file, const char __user *buf, 518 size_t count, loff_t *pos) 519 { 520 struct c4iw_dev *dev = ((struct seq_file *)file->private_data)->private; 521 522 mutex_lock(&dev->rdev.stats.lock); 523 dev->rdev.stats.pd.max = 0; 524 dev->rdev.stats.pd.fail = 0; 525 dev->rdev.stats.qid.max = 0; 526 dev->rdev.stats.qid.fail = 0; 527 dev->rdev.stats.stag.max = 0; 528 dev->rdev.stats.stag.fail = 0; 529 dev->rdev.stats.pbl.max = 0; 530 dev->rdev.stats.pbl.fail = 0; 531 dev->rdev.stats.rqt.max = 0; 532 dev->rdev.stats.rqt.fail = 0; 533 dev->rdev.stats.ocqp.max = 0; 534 dev->rdev.stats.ocqp.fail = 0; 535 dev->rdev.stats.db_full = 0; 536 dev->rdev.stats.db_empty = 0; 537 dev->rdev.stats.db_drop = 0; 538 dev->rdev.stats.db_state_transitions = 0; 539 dev->rdev.stats.tcam_full = 0; 540 dev->rdev.stats.act_ofld_conn_fails = 0; 541 dev->rdev.stats.pas_ofld_conn_fails = 0; 542 mutex_unlock(&dev->rdev.stats.lock); 543 return count; 544 } 545 546 static const struct file_operations stats_debugfs_fops = { 547 .owner = THIS_MODULE, 548 .open = stats_open, 549 .release = single_release, 550 .read = seq_read, 551 .llseek = seq_lseek, 552 .write = stats_clear, 553 }; 554 555 static int dump_ep(int id, void *p, void *data) 556 { 557 struct c4iw_ep *ep = p; 558 struct c4iw_debugfs_data *epd = data; 559 int space; 560 int cc; 561 562 space = epd->bufsize - epd->pos - 1; 563 if (space == 0) 564 return 1; 565 566 if (ep->com.local_addr.ss_family == AF_INET) { 567 struct sockaddr_in *lsin; 568 struct sockaddr_in *rsin; 569 struct sockaddr_in *m_lsin; 570 struct sockaddr_in *m_rsin; 571 572 set_ep_sin_addrs(ep, &lsin, &rsin, &m_lsin, &m_rsin); 573 cc = snprintf(epd->buf + epd->pos, space, 574 "ep %p cm_id %p qp %p state %d flags 0x%lx " 575 "history 0x%lx hwtid %d atid %d " 576 "conn_na %u abort_na %u " 577 "%pI4:%d/%d <-> %pI4:%d/%d\n", 578 ep, ep->com.cm_id, ep->com.qp, 579 (int)ep->com.state, ep->com.flags, 580 ep->com.history, ep->hwtid, ep->atid, 581 ep->stats.connect_neg_adv, 582 ep->stats.abort_neg_adv, 583 &lsin->sin_addr, ntohs(lsin->sin_port), 584 ntohs(m_lsin->sin_port), 585 &rsin->sin_addr, ntohs(rsin->sin_port), 586 ntohs(m_rsin->sin_port)); 587 } else { 588 struct sockaddr_in6 *lsin6; 589 struct sockaddr_in6 *rsin6; 590 struct sockaddr_in6 *m_lsin6; 591 struct sockaddr_in6 *m_rsin6; 592 593 set_ep_sin6_addrs(ep, &lsin6, &rsin6, &m_lsin6, &m_rsin6); 594 cc = snprintf(epd->buf + epd->pos, space, 595 "ep %p cm_id %p qp %p state %d flags 0x%lx " 596 "history 0x%lx hwtid %d atid %d " 597 "conn_na %u abort_na %u " 598 "%pI6:%d/%d <-> %pI6:%d/%d\n", 599 ep, ep->com.cm_id, ep->com.qp, 600 (int)ep->com.state, ep->com.flags, 601 ep->com.history, ep->hwtid, ep->atid, 602 ep->stats.connect_neg_adv, 603 ep->stats.abort_neg_adv, 604 &lsin6->sin6_addr, ntohs(lsin6->sin6_port), 605 ntohs(m_lsin6->sin6_port), 606 &rsin6->sin6_addr, ntohs(rsin6->sin6_port), 607 ntohs(m_rsin6->sin6_port)); 608 } 609 if (cc < space) 610 epd->pos += cc; 611 return 0; 612 } 613 614 static int dump_listen_ep(int id, void *p, void *data) 615 { 616 struct c4iw_listen_ep *ep = p; 617 struct c4iw_debugfs_data *epd = data; 618 int space; 619 int cc; 620 621 space = epd->bufsize - epd->pos - 1; 622 if (space == 0) 623 return 1; 624 625 if (ep->com.local_addr.ss_family == AF_INET) { 626 struct sockaddr_in *lsin = (struct sockaddr_in *) 627 &ep->com.cm_id->local_addr; 628 struct sockaddr_in *m_lsin = (struct sockaddr_in *) 629 &ep->com.cm_id->m_local_addr; 630 631 cc = snprintf(epd->buf + epd->pos, space, 632 "ep %p cm_id %p state %d flags 0x%lx stid %d " 633 "backlog %d %pI4:%d/%d\n", 634 ep, ep->com.cm_id, (int)ep->com.state, 635 ep->com.flags, ep->stid, ep->backlog, 636 &lsin->sin_addr, ntohs(lsin->sin_port), 637 ntohs(m_lsin->sin_port)); 638 } else { 639 struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *) 640 &ep->com.cm_id->local_addr; 641 struct sockaddr_in6 *m_lsin6 = (struct sockaddr_in6 *) 642 &ep->com.cm_id->m_local_addr; 643 644 cc = snprintf(epd->buf + epd->pos, space, 645 "ep %p cm_id %p state %d flags 0x%lx stid %d " 646 "backlog %d %pI6:%d/%d\n", 647 ep, ep->com.cm_id, (int)ep->com.state, 648 ep->com.flags, ep->stid, ep->backlog, 649 &lsin6->sin6_addr, ntohs(lsin6->sin6_port), 650 ntohs(m_lsin6->sin6_port)); 651 } 652 if (cc < space) 653 epd->pos += cc; 654 return 0; 655 } 656 657 static int ep_release(struct inode *inode, struct file *file) 658 { 659 struct c4iw_debugfs_data *epd = file->private_data; 660 if (!epd) { 661 pr_info("%s null qpd?\n", __func__); 662 return 0; 663 } 664 vfree(epd->buf); 665 kfree(epd); 666 return 0; 667 } 668 669 static int ep_open(struct inode *inode, struct file *file) 670 { 671 struct c4iw_debugfs_data *epd; 672 int ret = 0; 673 int count = 1; 674 675 epd = kmalloc(sizeof(*epd), GFP_KERNEL); 676 if (!epd) { 677 ret = -ENOMEM; 678 goto out; 679 } 680 epd->devp = inode->i_private; 681 epd->pos = 0; 682 683 spin_lock_irq(&epd->devp->lock); 684 idr_for_each(&epd->devp->hwtid_idr, count_idrs, &count); 685 idr_for_each(&epd->devp->atid_idr, count_idrs, &count); 686 idr_for_each(&epd->devp->stid_idr, count_idrs, &count); 687 spin_unlock_irq(&epd->devp->lock); 688 689 epd->bufsize = count * 240; 690 epd->buf = vmalloc(epd->bufsize); 691 if (!epd->buf) { 692 ret = -ENOMEM; 693 goto err1; 694 } 695 696 spin_lock_irq(&epd->devp->lock); 697 idr_for_each(&epd->devp->hwtid_idr, dump_ep, epd); 698 idr_for_each(&epd->devp->atid_idr, dump_ep, epd); 699 idr_for_each(&epd->devp->stid_idr, dump_listen_ep, epd); 700 spin_unlock_irq(&epd->devp->lock); 701 702 file->private_data = epd; 703 goto out; 704 err1: 705 kfree(epd); 706 out: 707 return ret; 708 } 709 710 static const struct file_operations ep_debugfs_fops = { 711 .owner = THIS_MODULE, 712 .open = ep_open, 713 .release = ep_release, 714 .read = debugfs_read, 715 }; 716 717 static int setup_debugfs(struct c4iw_dev *devp) 718 { 719 if (!devp->debugfs_root) 720 return -1; 721 722 debugfs_create_file_size("qps", S_IWUSR, devp->debugfs_root, 723 (void *)devp, &qp_debugfs_fops, 4096); 724 725 debugfs_create_file_size("stags", S_IWUSR, devp->debugfs_root, 726 (void *)devp, &stag_debugfs_fops, 4096); 727 728 debugfs_create_file_size("stats", S_IWUSR, devp->debugfs_root, 729 (void *)devp, &stats_debugfs_fops, 4096); 730 731 debugfs_create_file_size("eps", S_IWUSR, devp->debugfs_root, 732 (void *)devp, &ep_debugfs_fops, 4096); 733 734 if (c4iw_wr_log) 735 debugfs_create_file_size("wr_log", S_IWUSR, devp->debugfs_root, 736 (void *)devp, &wr_log_debugfs_fops, 4096); 737 return 0; 738 } 739 740 void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev, 741 struct c4iw_dev_ucontext *uctx) 742 { 743 struct list_head *pos, *nxt; 744 struct c4iw_qid_list *entry; 745 746 mutex_lock(&uctx->lock); 747 list_for_each_safe(pos, nxt, &uctx->qpids) { 748 entry = list_entry(pos, struct c4iw_qid_list, entry); 749 list_del_init(&entry->entry); 750 if (!(entry->qid & rdev->qpmask)) { 751 c4iw_put_resource(&rdev->resource.qid_table, 752 entry->qid); 753 mutex_lock(&rdev->stats.lock); 754 rdev->stats.qid.cur -= rdev->qpmask + 1; 755 mutex_unlock(&rdev->stats.lock); 756 } 757 kfree(entry); 758 } 759 760 list_for_each_safe(pos, nxt, &uctx->cqids) { 761 entry = list_entry(pos, struct c4iw_qid_list, entry); 762 list_del_init(&entry->entry); 763 kfree(entry); 764 } 765 mutex_unlock(&uctx->lock); 766 } 767 768 void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev, 769 struct c4iw_dev_ucontext *uctx) 770 { 771 INIT_LIST_HEAD(&uctx->qpids); 772 INIT_LIST_HEAD(&uctx->cqids); 773 mutex_init(&uctx->lock); 774 } 775 776 /* Caller takes care of locking if needed */ 777 static int c4iw_rdev_open(struct c4iw_rdev *rdev) 778 { 779 int err; 780 781 c4iw_init_dev_ucontext(rdev, &rdev->uctx); 782 783 /* 784 * This implementation assumes udb_density == ucq_density! Eventually 785 * we might need to support this but for now fail the open. Also the 786 * cqid and qpid range must match for now. 787 */ 788 if (rdev->lldi.udb_density != rdev->lldi.ucq_density) { 789 pr_err("%s: unsupported udb/ucq densities %u/%u\n", 790 pci_name(rdev->lldi.pdev), rdev->lldi.udb_density, 791 rdev->lldi.ucq_density); 792 return -EINVAL; 793 } 794 if (rdev->lldi.vr->qp.start != rdev->lldi.vr->cq.start || 795 rdev->lldi.vr->qp.size != rdev->lldi.vr->cq.size) { 796 pr_err("%s: unsupported qp and cq id ranges qp start %u size %u cq start %u size %u\n", 797 pci_name(rdev->lldi.pdev), rdev->lldi.vr->qp.start, 798 rdev->lldi.vr->qp.size, rdev->lldi.vr->cq.size, 799 rdev->lldi.vr->cq.size); 800 return -EINVAL; 801 } 802 803 rdev->qpmask = rdev->lldi.udb_density - 1; 804 rdev->cqmask = rdev->lldi.ucq_density - 1; 805 pr_debug("dev %s stag start 0x%0x size 0x%0x num stags %d pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x qp qid start %u size %u cq qid start %u size %u\n", 806 pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start, 807 rdev->lldi.vr->stag.size, c4iw_num_stags(rdev), 808 rdev->lldi.vr->pbl.start, 809 rdev->lldi.vr->pbl.size, rdev->lldi.vr->rq.start, 810 rdev->lldi.vr->rq.size, 811 rdev->lldi.vr->qp.start, 812 rdev->lldi.vr->qp.size, 813 rdev->lldi.vr->cq.start, 814 rdev->lldi.vr->cq.size); 815 pr_debug("udb %pR db_reg %p gts_reg %p qpmask 0x%x cqmask 0x%x\n", 816 &rdev->lldi.pdev->resource[2], 817 rdev->lldi.db_reg, rdev->lldi.gts_reg, 818 rdev->qpmask, rdev->cqmask); 819 820 if (c4iw_num_stags(rdev) == 0) 821 return -EINVAL; 822 823 rdev->stats.pd.total = T4_MAX_NUM_PD; 824 rdev->stats.stag.total = rdev->lldi.vr->stag.size; 825 rdev->stats.pbl.total = rdev->lldi.vr->pbl.size; 826 rdev->stats.rqt.total = rdev->lldi.vr->rq.size; 827 rdev->stats.ocqp.total = rdev->lldi.vr->ocq.size; 828 rdev->stats.qid.total = rdev->lldi.vr->qp.size; 829 830 err = c4iw_init_resource(rdev, c4iw_num_stags(rdev), T4_MAX_NUM_PD); 831 if (err) { 832 pr_err("error %d initializing resources\n", err); 833 return err; 834 } 835 err = c4iw_pblpool_create(rdev); 836 if (err) { 837 pr_err("error %d initializing pbl pool\n", err); 838 goto destroy_resource; 839 } 840 err = c4iw_rqtpool_create(rdev); 841 if (err) { 842 pr_err("error %d initializing rqt pool\n", err); 843 goto destroy_pblpool; 844 } 845 err = c4iw_ocqp_pool_create(rdev); 846 if (err) { 847 pr_err("error %d initializing ocqp pool\n", err); 848 goto destroy_rqtpool; 849 } 850 rdev->status_page = (struct t4_dev_status_page *) 851 __get_free_page(GFP_KERNEL); 852 if (!rdev->status_page) { 853 err = -ENOMEM; 854 goto destroy_ocqp_pool; 855 } 856 rdev->status_page->qp_start = rdev->lldi.vr->qp.start; 857 rdev->status_page->qp_size = rdev->lldi.vr->qp.size; 858 rdev->status_page->cq_start = rdev->lldi.vr->cq.start; 859 rdev->status_page->cq_size = rdev->lldi.vr->cq.size; 860 861 if (c4iw_wr_log) { 862 rdev->wr_log = kcalloc(1 << c4iw_wr_log_size_order, 863 sizeof(*rdev->wr_log), 864 GFP_KERNEL); 865 if (rdev->wr_log) { 866 rdev->wr_log_size = 1 << c4iw_wr_log_size_order; 867 atomic_set(&rdev->wr_log_idx, 0); 868 } 869 } 870 871 rdev->free_workq = create_singlethread_workqueue("iw_cxgb4_free"); 872 if (!rdev->free_workq) { 873 err = -ENOMEM; 874 goto err_free_status_page_and_wr_log; 875 } 876 877 rdev->status_page->db_off = 0; 878 879 init_completion(&rdev->rqt_compl); 880 init_completion(&rdev->pbl_compl); 881 kref_init(&rdev->rqt_kref); 882 kref_init(&rdev->pbl_kref); 883 884 return 0; 885 err_free_status_page_and_wr_log: 886 if (c4iw_wr_log && rdev->wr_log) 887 kfree(rdev->wr_log); 888 free_page((unsigned long)rdev->status_page); 889 destroy_ocqp_pool: 890 c4iw_ocqp_pool_destroy(rdev); 891 destroy_rqtpool: 892 c4iw_rqtpool_destroy(rdev); 893 destroy_pblpool: 894 c4iw_pblpool_destroy(rdev); 895 destroy_resource: 896 c4iw_destroy_resource(&rdev->resource); 897 return err; 898 } 899 900 static void c4iw_rdev_close(struct c4iw_rdev *rdev) 901 { 902 kfree(rdev->wr_log); 903 c4iw_release_dev_ucontext(rdev, &rdev->uctx); 904 free_page((unsigned long)rdev->status_page); 905 c4iw_pblpool_destroy(rdev); 906 c4iw_rqtpool_destroy(rdev); 907 wait_for_completion(&rdev->pbl_compl); 908 wait_for_completion(&rdev->rqt_compl); 909 c4iw_ocqp_pool_destroy(rdev); 910 destroy_workqueue(rdev->free_workq); 911 c4iw_destroy_resource(&rdev->resource); 912 } 913 914 void c4iw_dealloc(struct uld_ctx *ctx) 915 { 916 c4iw_rdev_close(&ctx->dev->rdev); 917 WARN_ON_ONCE(!idr_is_empty(&ctx->dev->cqidr)); 918 idr_destroy(&ctx->dev->cqidr); 919 WARN_ON_ONCE(!idr_is_empty(&ctx->dev->qpidr)); 920 idr_destroy(&ctx->dev->qpidr); 921 WARN_ON_ONCE(!idr_is_empty(&ctx->dev->mmidr)); 922 idr_destroy(&ctx->dev->mmidr); 923 wait_event(ctx->dev->wait, idr_is_empty(&ctx->dev->hwtid_idr)); 924 idr_destroy(&ctx->dev->hwtid_idr); 925 idr_destroy(&ctx->dev->stid_idr); 926 idr_destroy(&ctx->dev->atid_idr); 927 if (ctx->dev->rdev.bar2_kva) 928 iounmap(ctx->dev->rdev.bar2_kva); 929 if (ctx->dev->rdev.oc_mw_kva) 930 iounmap(ctx->dev->rdev.oc_mw_kva); 931 ib_dealloc_device(&ctx->dev->ibdev); 932 ctx->dev = NULL; 933 } 934 935 static void c4iw_remove(struct uld_ctx *ctx) 936 { 937 pr_debug("c4iw_dev %p\n", ctx->dev); 938 c4iw_unregister_device(ctx->dev); 939 c4iw_dealloc(ctx); 940 } 941 942 static int rdma_supported(const struct cxgb4_lld_info *infop) 943 { 944 return infop->vr->stag.size > 0 && infop->vr->pbl.size > 0 && 945 infop->vr->rq.size > 0 && infop->vr->qp.size > 0 && 946 infop->vr->cq.size > 0; 947 } 948 949 static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) 950 { 951 struct c4iw_dev *devp; 952 int ret; 953 954 if (!rdma_supported(infop)) { 955 pr_info("%s: RDMA not supported on this device\n", 956 pci_name(infop->pdev)); 957 return ERR_PTR(-ENOSYS); 958 } 959 if (!ocqp_supported(infop)) 960 pr_info("%s: On-Chip Queues not supported on this device\n", 961 pci_name(infop->pdev)); 962 963 devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp)); 964 if (!devp) { 965 pr_err("Cannot allocate ib device\n"); 966 return ERR_PTR(-ENOMEM); 967 } 968 devp->rdev.lldi = *infop; 969 970 /* init various hw-queue params based on lld info */ 971 pr_debug("Ing. padding boundary is %d, egrsstatuspagesize = %d\n", 972 devp->rdev.lldi.sge_ingpadboundary, 973 devp->rdev.lldi.sge_egrstatuspagesize); 974 975 devp->rdev.hw_queue.t4_eq_status_entries = 976 devp->rdev.lldi.sge_egrstatuspagesize / 64; 977 devp->rdev.hw_queue.t4_max_eq_size = 65520; 978 devp->rdev.hw_queue.t4_max_iq_size = 65520; 979 devp->rdev.hw_queue.t4_max_rq_size = 8192 - 980 devp->rdev.hw_queue.t4_eq_status_entries - 1; 981 devp->rdev.hw_queue.t4_max_sq_size = 982 devp->rdev.hw_queue.t4_max_eq_size - 983 devp->rdev.hw_queue.t4_eq_status_entries - 1; 984 devp->rdev.hw_queue.t4_max_qp_depth = 985 devp->rdev.hw_queue.t4_max_rq_size; 986 devp->rdev.hw_queue.t4_max_cq_depth = 987 devp->rdev.hw_queue.t4_max_iq_size - 2; 988 devp->rdev.hw_queue.t4_stat_len = 989 devp->rdev.lldi.sge_egrstatuspagesize; 990 991 /* 992 * For T5/T6 devices, we map all of BAR2 with WC. 993 * For T4 devices with onchip qp mem, we map only that part 994 * of BAR2 with WC. 995 */ 996 devp->rdev.bar2_pa = pci_resource_start(devp->rdev.lldi.pdev, 2); 997 if (!is_t4(devp->rdev.lldi.adapter_type)) { 998 devp->rdev.bar2_kva = ioremap_wc(devp->rdev.bar2_pa, 999 pci_resource_len(devp->rdev.lldi.pdev, 2)); 1000 if (!devp->rdev.bar2_kva) { 1001 pr_err("Unable to ioremap BAR2\n"); 1002 ib_dealloc_device(&devp->ibdev); 1003 return ERR_PTR(-EINVAL); 1004 } 1005 } else if (ocqp_supported(infop)) { 1006 devp->rdev.oc_mw_pa = 1007 pci_resource_start(devp->rdev.lldi.pdev, 2) + 1008 pci_resource_len(devp->rdev.lldi.pdev, 2) - 1009 roundup_pow_of_two(devp->rdev.lldi.vr->ocq.size); 1010 devp->rdev.oc_mw_kva = ioremap_wc(devp->rdev.oc_mw_pa, 1011 devp->rdev.lldi.vr->ocq.size); 1012 if (!devp->rdev.oc_mw_kva) { 1013 pr_err("Unable to ioremap onchip mem\n"); 1014 ib_dealloc_device(&devp->ibdev); 1015 return ERR_PTR(-EINVAL); 1016 } 1017 } 1018 1019 pr_debug("ocq memory: hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n", 1020 devp->rdev.lldi.vr->ocq.start, devp->rdev.lldi.vr->ocq.size, 1021 devp->rdev.oc_mw_pa, devp->rdev.oc_mw_kva); 1022 1023 ret = c4iw_rdev_open(&devp->rdev); 1024 if (ret) { 1025 pr_err("Unable to open CXIO rdev err %d\n", ret); 1026 ib_dealloc_device(&devp->ibdev); 1027 return ERR_PTR(ret); 1028 } 1029 1030 idr_init(&devp->cqidr); 1031 idr_init(&devp->qpidr); 1032 idr_init(&devp->mmidr); 1033 idr_init(&devp->hwtid_idr); 1034 idr_init(&devp->stid_idr); 1035 idr_init(&devp->atid_idr); 1036 spin_lock_init(&devp->lock); 1037 mutex_init(&devp->rdev.stats.lock); 1038 mutex_init(&devp->db_mutex); 1039 INIT_LIST_HEAD(&devp->db_fc_list); 1040 init_waitqueue_head(&devp->wait); 1041 devp->avail_ird = devp->rdev.lldi.max_ird_adapter; 1042 1043 if (c4iw_debugfs_root) { 1044 devp->debugfs_root = debugfs_create_dir( 1045 pci_name(devp->rdev.lldi.pdev), 1046 c4iw_debugfs_root); 1047 setup_debugfs(devp); 1048 } 1049 1050 1051 return devp; 1052 } 1053 1054 static void *c4iw_uld_add(const struct cxgb4_lld_info *infop) 1055 { 1056 struct uld_ctx *ctx; 1057 static int vers_printed; 1058 int i; 1059 1060 if (!vers_printed++) 1061 pr_info("Chelsio T4/T5 RDMA Driver - version %s\n", 1062 DRV_VERSION); 1063 1064 ctx = kzalloc(sizeof *ctx, GFP_KERNEL); 1065 if (!ctx) { 1066 ctx = ERR_PTR(-ENOMEM); 1067 goto out; 1068 } 1069 ctx->lldi = *infop; 1070 1071 pr_debug("found device %s nchan %u nrxq %u ntxq %u nports %u\n", 1072 pci_name(ctx->lldi.pdev), 1073 ctx->lldi.nchan, ctx->lldi.nrxq, 1074 ctx->lldi.ntxq, ctx->lldi.nports); 1075 1076 mutex_lock(&dev_mutex); 1077 list_add_tail(&ctx->entry, &uld_ctx_list); 1078 mutex_unlock(&dev_mutex); 1079 1080 for (i = 0; i < ctx->lldi.nrxq; i++) 1081 pr_debug("rxqid[%u] %u\n", i, ctx->lldi.rxq_ids[i]); 1082 out: 1083 return ctx; 1084 } 1085 1086 static inline struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl, 1087 const __be64 *rsp, 1088 u32 pktshift) 1089 { 1090 struct sk_buff *skb; 1091 1092 /* 1093 * Allocate space for cpl_pass_accept_req which will be synthesized by 1094 * driver. Once the driver synthesizes the request the skb will go 1095 * through the regular cpl_pass_accept_req processing. 1096 * The math here assumes sizeof cpl_pass_accept_req >= sizeof 1097 * cpl_rx_pkt. 1098 */ 1099 skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req) + 1100 sizeof(struct rss_header) - pktshift, GFP_ATOMIC); 1101 if (unlikely(!skb)) 1102 return NULL; 1103 1104 __skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req) + 1105 sizeof(struct rss_header) - pktshift); 1106 1107 /* 1108 * This skb will contain: 1109 * rss_header from the rspq descriptor (1 flit) 1110 * cpl_rx_pkt struct from the rspq descriptor (2 flits) 1111 * space for the difference between the size of an 1112 * rx_pkt and pass_accept_req cpl (1 flit) 1113 * the packet data from the gl 1114 */ 1115 skb_copy_to_linear_data(skb, rsp, sizeof(struct cpl_pass_accept_req) + 1116 sizeof(struct rss_header)); 1117 skb_copy_to_linear_data_offset(skb, sizeof(struct rss_header) + 1118 sizeof(struct cpl_pass_accept_req), 1119 gl->va + pktshift, 1120 gl->tot_len - pktshift); 1121 return skb; 1122 } 1123 1124 static inline int recv_rx_pkt(struct c4iw_dev *dev, const struct pkt_gl *gl, 1125 const __be64 *rsp) 1126 { 1127 unsigned int opcode = *(u8 *)rsp; 1128 struct sk_buff *skb; 1129 1130 if (opcode != CPL_RX_PKT) 1131 goto out; 1132 1133 skb = copy_gl_to_skb_pkt(gl , rsp, dev->rdev.lldi.sge_pktshift); 1134 if (skb == NULL) 1135 goto out; 1136 1137 if (c4iw_handlers[opcode] == NULL) { 1138 pr_info("%s no handler opcode 0x%x...\n", __func__, opcode); 1139 kfree_skb(skb); 1140 goto out; 1141 } 1142 c4iw_handlers[opcode](dev, skb); 1143 return 1; 1144 out: 1145 return 0; 1146 } 1147 1148 static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp, 1149 const struct pkt_gl *gl) 1150 { 1151 struct uld_ctx *ctx = handle; 1152 struct c4iw_dev *dev = ctx->dev; 1153 struct sk_buff *skb; 1154 u8 opcode; 1155 1156 if (gl == NULL) { 1157 /* omit RSS and rsp_ctrl at end of descriptor */ 1158 unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8; 1159 1160 skb = alloc_skb(256, GFP_ATOMIC); 1161 if (!skb) 1162 goto nomem; 1163 __skb_put(skb, len); 1164 skb_copy_to_linear_data(skb, &rsp[1], len); 1165 } else if (gl == CXGB4_MSG_AN) { 1166 const struct rsp_ctrl *rc = (void *)rsp; 1167 1168 u32 qid = be32_to_cpu(rc->pldbuflen_qid); 1169 c4iw_ev_handler(dev, qid); 1170 return 0; 1171 } else if (unlikely(*(u8 *)rsp != *(u8 *)gl->va)) { 1172 if (recv_rx_pkt(dev, gl, rsp)) 1173 return 0; 1174 1175 pr_info("%s: unexpected FL contents at %p, RSS %#llx, FL %#llx, len %u\n", 1176 pci_name(ctx->lldi.pdev), gl->va, 1177 be64_to_cpu(*rsp), 1178 be64_to_cpu(*(__force __be64 *)gl->va), 1179 gl->tot_len); 1180 1181 return 0; 1182 } else { 1183 skb = cxgb4_pktgl_to_skb(gl, 128, 128); 1184 if (unlikely(!skb)) 1185 goto nomem; 1186 } 1187 1188 opcode = *(u8 *)rsp; 1189 if (c4iw_handlers[opcode]) { 1190 c4iw_handlers[opcode](dev, skb); 1191 } else { 1192 pr_info("%s no handler opcode 0x%x...\n", __func__, opcode); 1193 kfree_skb(skb); 1194 } 1195 1196 return 0; 1197 nomem: 1198 return -1; 1199 } 1200 1201 static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state) 1202 { 1203 struct uld_ctx *ctx = handle; 1204 1205 pr_debug("new_state %u\n", new_state); 1206 switch (new_state) { 1207 case CXGB4_STATE_UP: 1208 pr_info("%s: Up\n", pci_name(ctx->lldi.pdev)); 1209 if (!ctx->dev) { 1210 ctx->dev = c4iw_alloc(&ctx->lldi); 1211 if (IS_ERR(ctx->dev)) { 1212 pr_err("%s: initialization failed: %ld\n", 1213 pci_name(ctx->lldi.pdev), 1214 PTR_ERR(ctx->dev)); 1215 ctx->dev = NULL; 1216 break; 1217 } 1218 1219 INIT_WORK(&ctx->reg_work, c4iw_register_device); 1220 queue_work(reg_workq, &ctx->reg_work); 1221 } 1222 break; 1223 case CXGB4_STATE_DOWN: 1224 pr_info("%s: Down\n", pci_name(ctx->lldi.pdev)); 1225 if (ctx->dev) 1226 c4iw_remove(ctx); 1227 break; 1228 case CXGB4_STATE_FATAL_ERROR: 1229 case CXGB4_STATE_START_RECOVERY: 1230 pr_info("%s: Fatal Error\n", pci_name(ctx->lldi.pdev)); 1231 if (ctx->dev) { 1232 struct ib_event event; 1233 1234 ctx->dev->rdev.flags |= T4_FATAL_ERROR; 1235 memset(&event, 0, sizeof event); 1236 event.event = IB_EVENT_DEVICE_FATAL; 1237 event.device = &ctx->dev->ibdev; 1238 ib_dispatch_event(&event); 1239 c4iw_remove(ctx); 1240 } 1241 break; 1242 case CXGB4_STATE_DETACH: 1243 pr_info("%s: Detach\n", pci_name(ctx->lldi.pdev)); 1244 if (ctx->dev) 1245 c4iw_remove(ctx); 1246 break; 1247 } 1248 return 0; 1249 } 1250 1251 static int disable_qp_db(int id, void *p, void *data) 1252 { 1253 struct c4iw_qp *qp = p; 1254 1255 t4_disable_wq_db(&qp->wq); 1256 return 0; 1257 } 1258 1259 static void stop_queues(struct uld_ctx *ctx) 1260 { 1261 unsigned long flags; 1262 1263 spin_lock_irqsave(&ctx->dev->lock, flags); 1264 ctx->dev->rdev.stats.db_state_transitions++; 1265 ctx->dev->db_state = STOPPED; 1266 if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED) 1267 idr_for_each(&ctx->dev->qpidr, disable_qp_db, NULL); 1268 else 1269 ctx->dev->rdev.status_page->db_off = 1; 1270 spin_unlock_irqrestore(&ctx->dev->lock, flags); 1271 } 1272 1273 static int enable_qp_db(int id, void *p, void *data) 1274 { 1275 struct c4iw_qp *qp = p; 1276 1277 t4_enable_wq_db(&qp->wq); 1278 return 0; 1279 } 1280 1281 static void resume_rc_qp(struct c4iw_qp *qp) 1282 { 1283 spin_lock(&qp->lock); 1284 t4_ring_sq_db(&qp->wq, qp->wq.sq.wq_pidx_inc, NULL); 1285 qp->wq.sq.wq_pidx_inc = 0; 1286 t4_ring_rq_db(&qp->wq, qp->wq.rq.wq_pidx_inc, NULL); 1287 qp->wq.rq.wq_pidx_inc = 0; 1288 spin_unlock(&qp->lock); 1289 } 1290 1291 static void resume_a_chunk(struct uld_ctx *ctx) 1292 { 1293 int i; 1294 struct c4iw_qp *qp; 1295 1296 for (i = 0; i < DB_FC_RESUME_SIZE; i++) { 1297 qp = list_first_entry(&ctx->dev->db_fc_list, struct c4iw_qp, 1298 db_fc_entry); 1299 list_del_init(&qp->db_fc_entry); 1300 resume_rc_qp(qp); 1301 if (list_empty(&ctx->dev->db_fc_list)) 1302 break; 1303 } 1304 } 1305 1306 static void resume_queues(struct uld_ctx *ctx) 1307 { 1308 spin_lock_irq(&ctx->dev->lock); 1309 if (ctx->dev->db_state != STOPPED) 1310 goto out; 1311 ctx->dev->db_state = FLOW_CONTROL; 1312 while (1) { 1313 if (list_empty(&ctx->dev->db_fc_list)) { 1314 WARN_ON(ctx->dev->db_state != FLOW_CONTROL); 1315 ctx->dev->db_state = NORMAL; 1316 ctx->dev->rdev.stats.db_state_transitions++; 1317 if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED) { 1318 idr_for_each(&ctx->dev->qpidr, enable_qp_db, 1319 NULL); 1320 } else { 1321 ctx->dev->rdev.status_page->db_off = 0; 1322 } 1323 break; 1324 } else { 1325 if (cxgb4_dbfifo_count(ctx->dev->rdev.lldi.ports[0], 1) 1326 < (ctx->dev->rdev.lldi.dbfifo_int_thresh << 1327 DB_FC_DRAIN_THRESH)) { 1328 resume_a_chunk(ctx); 1329 } 1330 if (!list_empty(&ctx->dev->db_fc_list)) { 1331 spin_unlock_irq(&ctx->dev->lock); 1332 if (DB_FC_RESUME_DELAY) { 1333 set_current_state(TASK_UNINTERRUPTIBLE); 1334 schedule_timeout(DB_FC_RESUME_DELAY); 1335 } 1336 spin_lock_irq(&ctx->dev->lock); 1337 if (ctx->dev->db_state != FLOW_CONTROL) 1338 break; 1339 } 1340 } 1341 } 1342 out: 1343 if (ctx->dev->db_state != NORMAL) 1344 ctx->dev->rdev.stats.db_fc_interruptions++; 1345 spin_unlock_irq(&ctx->dev->lock); 1346 } 1347 1348 struct qp_list { 1349 unsigned idx; 1350 struct c4iw_qp **qps; 1351 }; 1352 1353 static int add_and_ref_qp(int id, void *p, void *data) 1354 { 1355 struct qp_list *qp_listp = data; 1356 struct c4iw_qp *qp = p; 1357 1358 c4iw_qp_add_ref(&qp->ibqp); 1359 qp_listp->qps[qp_listp->idx++] = qp; 1360 return 0; 1361 } 1362 1363 static int count_qps(int id, void *p, void *data) 1364 { 1365 unsigned *countp = data; 1366 (*countp)++; 1367 return 0; 1368 } 1369 1370 static void deref_qps(struct qp_list *qp_list) 1371 { 1372 int idx; 1373 1374 for (idx = 0; idx < qp_list->idx; idx++) 1375 c4iw_qp_rem_ref(&qp_list->qps[idx]->ibqp); 1376 } 1377 1378 static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list) 1379 { 1380 int idx; 1381 int ret; 1382 1383 for (idx = 0; idx < qp_list->idx; idx++) { 1384 struct c4iw_qp *qp = qp_list->qps[idx]; 1385 1386 spin_lock_irq(&qp->rhp->lock); 1387 spin_lock(&qp->lock); 1388 ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0], 1389 qp->wq.sq.qid, 1390 t4_sq_host_wq_pidx(&qp->wq), 1391 t4_sq_wq_size(&qp->wq)); 1392 if (ret) { 1393 pr_err("%s: Fatal error - DB overflow recovery failed - error syncing SQ qid %u\n", 1394 pci_name(ctx->lldi.pdev), qp->wq.sq.qid); 1395 spin_unlock(&qp->lock); 1396 spin_unlock_irq(&qp->rhp->lock); 1397 return; 1398 } 1399 qp->wq.sq.wq_pidx_inc = 0; 1400 1401 ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0], 1402 qp->wq.rq.qid, 1403 t4_rq_host_wq_pidx(&qp->wq), 1404 t4_rq_wq_size(&qp->wq)); 1405 1406 if (ret) { 1407 pr_err("%s: Fatal error - DB overflow recovery failed - error syncing RQ qid %u\n", 1408 pci_name(ctx->lldi.pdev), qp->wq.rq.qid); 1409 spin_unlock(&qp->lock); 1410 spin_unlock_irq(&qp->rhp->lock); 1411 return; 1412 } 1413 qp->wq.rq.wq_pidx_inc = 0; 1414 spin_unlock(&qp->lock); 1415 spin_unlock_irq(&qp->rhp->lock); 1416 1417 /* Wait for the dbfifo to drain */ 1418 while (cxgb4_dbfifo_count(qp->rhp->rdev.lldi.ports[0], 1) > 0) { 1419 set_current_state(TASK_UNINTERRUPTIBLE); 1420 schedule_timeout(usecs_to_jiffies(10)); 1421 } 1422 } 1423 } 1424 1425 static void recover_queues(struct uld_ctx *ctx) 1426 { 1427 int count = 0; 1428 struct qp_list qp_list; 1429 int ret; 1430 1431 /* slow everybody down */ 1432 set_current_state(TASK_UNINTERRUPTIBLE); 1433 schedule_timeout(usecs_to_jiffies(1000)); 1434 1435 /* flush the SGE contexts */ 1436 ret = cxgb4_flush_eq_cache(ctx->dev->rdev.lldi.ports[0]); 1437 if (ret) { 1438 pr_err("%s: Fatal error - DB overflow recovery failed\n", 1439 pci_name(ctx->lldi.pdev)); 1440 return; 1441 } 1442 1443 /* Count active queues so we can build a list of queues to recover */ 1444 spin_lock_irq(&ctx->dev->lock); 1445 WARN_ON(ctx->dev->db_state != STOPPED); 1446 ctx->dev->db_state = RECOVERY; 1447 idr_for_each(&ctx->dev->qpidr, count_qps, &count); 1448 1449 qp_list.qps = kcalloc(count, sizeof(*qp_list.qps), GFP_ATOMIC); 1450 if (!qp_list.qps) { 1451 spin_unlock_irq(&ctx->dev->lock); 1452 return; 1453 } 1454 qp_list.idx = 0; 1455 1456 /* add and ref each qp so it doesn't get freed */ 1457 idr_for_each(&ctx->dev->qpidr, add_and_ref_qp, &qp_list); 1458 1459 spin_unlock_irq(&ctx->dev->lock); 1460 1461 /* now traverse the list in a safe context to recover the db state*/ 1462 recover_lost_dbs(ctx, &qp_list); 1463 1464 /* we're almost done! deref the qps and clean up */ 1465 deref_qps(&qp_list); 1466 kfree(qp_list.qps); 1467 1468 spin_lock_irq(&ctx->dev->lock); 1469 WARN_ON(ctx->dev->db_state != RECOVERY); 1470 ctx->dev->db_state = STOPPED; 1471 spin_unlock_irq(&ctx->dev->lock); 1472 } 1473 1474 static int c4iw_uld_control(void *handle, enum cxgb4_control control, ...) 1475 { 1476 struct uld_ctx *ctx = handle; 1477 1478 switch (control) { 1479 case CXGB4_CONTROL_DB_FULL: 1480 stop_queues(ctx); 1481 ctx->dev->rdev.stats.db_full++; 1482 break; 1483 case CXGB4_CONTROL_DB_EMPTY: 1484 resume_queues(ctx); 1485 mutex_lock(&ctx->dev->rdev.stats.lock); 1486 ctx->dev->rdev.stats.db_empty++; 1487 mutex_unlock(&ctx->dev->rdev.stats.lock); 1488 break; 1489 case CXGB4_CONTROL_DB_DROP: 1490 recover_queues(ctx); 1491 mutex_lock(&ctx->dev->rdev.stats.lock); 1492 ctx->dev->rdev.stats.db_drop++; 1493 mutex_unlock(&ctx->dev->rdev.stats.lock); 1494 break; 1495 default: 1496 pr_warn("%s: unknown control cmd %u\n", 1497 pci_name(ctx->lldi.pdev), control); 1498 break; 1499 } 1500 return 0; 1501 } 1502 1503 static struct cxgb4_uld_info c4iw_uld_info = { 1504 .name = DRV_NAME, 1505 .nrxq = MAX_ULD_QSETS, 1506 .ntxq = MAX_ULD_QSETS, 1507 .rxq_size = 511, 1508 .ciq = true, 1509 .lro = false, 1510 .add = c4iw_uld_add, 1511 .rx_handler = c4iw_uld_rx_handler, 1512 .state_change = c4iw_uld_state_change, 1513 .control = c4iw_uld_control, 1514 }; 1515 1516 void _c4iw_free_wr_wait(struct kref *kref) 1517 { 1518 struct c4iw_wr_wait *wr_waitp; 1519 1520 wr_waitp = container_of(kref, struct c4iw_wr_wait, kref); 1521 pr_debug("Free wr_wait %p\n", wr_waitp); 1522 kfree(wr_waitp); 1523 } 1524 1525 struct c4iw_wr_wait *c4iw_alloc_wr_wait(gfp_t gfp) 1526 { 1527 struct c4iw_wr_wait *wr_waitp; 1528 1529 wr_waitp = kzalloc(sizeof(*wr_waitp), gfp); 1530 if (wr_waitp) { 1531 kref_init(&wr_waitp->kref); 1532 pr_debug("wr_wait %p\n", wr_waitp); 1533 } 1534 return wr_waitp; 1535 } 1536 1537 static int __init c4iw_init_module(void) 1538 { 1539 int err; 1540 1541 err = c4iw_cm_init(); 1542 if (err) 1543 return err; 1544 1545 c4iw_debugfs_root = debugfs_create_dir(DRV_NAME, NULL); 1546 if (!c4iw_debugfs_root) 1547 pr_warn("could not create debugfs entry, continuing\n"); 1548 1549 reg_workq = create_singlethread_workqueue("Register_iWARP_device"); 1550 if (!reg_workq) { 1551 pr_err("Failed creating workqueue to register iwarp device\n"); 1552 return -ENOMEM; 1553 } 1554 1555 cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info); 1556 1557 return 0; 1558 } 1559 1560 static void __exit c4iw_exit_module(void) 1561 { 1562 struct uld_ctx *ctx, *tmp; 1563 1564 mutex_lock(&dev_mutex); 1565 list_for_each_entry_safe(ctx, tmp, &uld_ctx_list, entry) { 1566 if (ctx->dev) 1567 c4iw_remove(ctx); 1568 kfree(ctx); 1569 } 1570 mutex_unlock(&dev_mutex); 1571 flush_workqueue(reg_workq); 1572 destroy_workqueue(reg_workq); 1573 cxgb4_unregister_uld(CXGB4_ULD_RDMA); 1574 c4iw_cm_term(); 1575 debugfs_remove_recursive(c4iw_debugfs_root); 1576 } 1577 1578 module_init(c4iw_init_module); 1579 module_exit(c4iw_exit_module); 1580