1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Mellanox BlueField SoC TmFifo driver 4 * 5 * Copyright (C) 2019 Mellanox Technologies 6 */ 7 8 #include <linux/acpi.h> 9 #include <linux/bitfield.h> 10 #include <linux/circ_buf.h> 11 #include <linux/efi.h> 12 #include <linux/irq.h> 13 #include <linux/module.h> 14 #include <linux/mutex.h> 15 #include <linux/platform_device.h> 16 #include <linux/types.h> 17 18 #include <linux/virtio_config.h> 19 #include <linux/virtio_console.h> 20 #include <linux/virtio_ids.h> 21 #include <linux/virtio_net.h> 22 #include <linux/virtio_ring.h> 23 24 #include "mlxbf-tmfifo-regs.h" 25 26 /* Vring size. */ 27 #define MLXBF_TMFIFO_VRING_SIZE SZ_1K 28 29 /* Console Tx buffer size. */ 30 #define MLXBF_TMFIFO_CON_TX_BUF_SIZE SZ_32K 31 32 /* Console Tx buffer reserved space. */ 33 #define MLXBF_TMFIFO_CON_TX_BUF_RSV_SIZE 8 34 35 /* House-keeping timer interval. */ 36 #define MLXBF_TMFIFO_TIMER_INTERVAL (HZ / 10) 37 38 /* Virtual devices sharing the TM FIFO. */ 39 #define MLXBF_TMFIFO_VDEV_MAX (VIRTIO_ID_CONSOLE + 1) 40 41 /* 42 * Reserve 1/16 of TmFifo space, so console messages are not starved by 43 * the networking traffic. 44 */ 45 #define MLXBF_TMFIFO_RESERVE_RATIO 16 46 47 /* Message with data needs at least two words (for header & data). */ 48 #define MLXBF_TMFIFO_DATA_MIN_WORDS 2 49 50 /* Tx timeout in milliseconds. */ 51 #define TMFIFO_TX_TIMEOUT 2000 52 53 /* ACPI UID for BlueField-3. */ 54 #define TMFIFO_BF3_UID 1 55 56 struct mlxbf_tmfifo; 57 58 /** 59 * struct mlxbf_tmfifo_vring - Structure of the TmFifo virtual ring 60 * @va: virtual address of the ring 61 * @dma: dma address of the ring 62 * @vq: pointer to the virtio virtqueue 63 * @desc: current descriptor of the pending packet 64 * @desc_head: head descriptor of the pending packet 65 * @drop_desc: dummy desc for packet dropping 66 * @cur_len: processed length of the current descriptor 67 * @rem_len: remaining length of the pending packet 68 * @rem_padding: remaining bytes to send as paddings 69 * @pkt_len: total length of the pending packet 70 * @next_avail: next avail descriptor id 71 * @num: vring size (number of descriptors) 72 * @align: vring alignment size 73 * @index: vring index 74 * @vdev_id: vring virtio id (VIRTIO_ID_xxx) 75 * @tx_timeout: expire time of last tx packet 76 * @fifo: pointer to the tmfifo structure 77 */ 78 struct mlxbf_tmfifo_vring { 79 void *va; 80 dma_addr_t dma; 81 struct virtqueue *vq; 82 struct vring_desc *desc; 83 struct vring_desc *desc_head; 84 struct vring_desc drop_desc; 85 int cur_len; 86 int rem_len; 87 int rem_padding; 88 u32 pkt_len; 89 u16 next_avail; 90 int num; 91 int align; 92 int index; 93 int vdev_id; 94 unsigned long tx_timeout; 95 struct mlxbf_tmfifo *fifo; 96 }; 97 98 /* Check whether vring is in drop mode. */ 99 #define IS_VRING_DROP(_r) ({ \ 100 typeof(_r) (r) = (_r); \ 101 r->desc_head == &r->drop_desc; }) 102 103 /* A stub length to drop maximum length packet. */ 104 #define VRING_DROP_DESC_MAX_LEN GENMASK(15, 0) 105 106 /* Interrupt types. */ 107 enum { 108 MLXBF_TM_RX_LWM_IRQ, 109 MLXBF_TM_RX_HWM_IRQ, 110 MLXBF_TM_TX_LWM_IRQ, 111 MLXBF_TM_TX_HWM_IRQ, 112 MLXBF_TM_MAX_IRQ 113 }; 114 115 /* Ring types (Rx & Tx). */ 116 enum { 117 MLXBF_TMFIFO_VRING_RX, 118 MLXBF_TMFIFO_VRING_TX, 119 MLXBF_TMFIFO_VRING_MAX 120 }; 121 122 /** 123 * struct mlxbf_tmfifo_vdev - Structure of the TmFifo virtual device 124 * @vdev: virtio device, in which the vdev.id.device field has the 125 * VIRTIO_ID_xxx id to distinguish the virtual device. 126 * @status: status of the device 127 * @features: supported features of the device 128 * @vrings: array of tmfifo vrings of this device 129 * @config: non-anonymous union for cons and net 130 * @config.cons: virtual console config - 131 * select if vdev.id.device is VIRTIO_ID_CONSOLE 132 * @config.net: virtual network config - 133 * select if vdev.id.device is VIRTIO_ID_NET 134 * @tx_buf: tx buffer used to buffer data before writing into the FIFO 135 */ 136 struct mlxbf_tmfifo_vdev { 137 struct virtio_device vdev; 138 u8 status; 139 u64 features; 140 struct mlxbf_tmfifo_vring vrings[MLXBF_TMFIFO_VRING_MAX]; 141 union { 142 struct virtio_console_config cons; 143 struct virtio_net_config net; 144 } config; 145 struct circ_buf tx_buf; 146 }; 147 148 /** 149 * struct mlxbf_tmfifo_irq_info - Structure of the interrupt information 150 * @fifo: pointer to the tmfifo structure 151 * @irq: interrupt number 152 * @index: index into the interrupt array 153 */ 154 struct mlxbf_tmfifo_irq_info { 155 struct mlxbf_tmfifo *fifo; 156 int irq; 157 int index; 158 }; 159 160 /** 161 * struct mlxbf_tmfifo_io - Structure of the TmFifo IO resource (for both rx & tx) 162 * @ctl: control register offset (TMFIFO_RX_CTL / TMFIFO_TX_CTL) 163 * @sts: status register offset (TMFIFO_RX_STS / TMFIFO_TX_STS) 164 * @data: data register offset (TMFIFO_RX_DATA / TMFIFO_TX_DATA) 165 */ 166 struct mlxbf_tmfifo_io { 167 void __iomem *ctl; 168 void __iomem *sts; 169 void __iomem *data; 170 }; 171 172 /** 173 * struct mlxbf_tmfifo - Structure of the TmFifo 174 * @vdev: array of the virtual devices running over the TmFifo 175 * @lock: lock to protect the TmFifo access 176 * @res0: mapped resource block 0 177 * @res1: mapped resource block 1 178 * @rx: rx io resource 179 * @tx: tx io resource 180 * @rx_fifo_size: number of entries of the Rx FIFO 181 * @tx_fifo_size: number of entries of the Tx FIFO 182 * @pend_events: pending bits for deferred events 183 * @irq_info: interrupt information 184 * @work: work struct for deferred process 185 * @timer: background timer 186 * @vring: Tx/Rx ring 187 * @spin_lock: Tx/Rx spin lock 188 * @is_ready: ready flag 189 */ 190 struct mlxbf_tmfifo { 191 struct mlxbf_tmfifo_vdev *vdev[MLXBF_TMFIFO_VDEV_MAX]; 192 struct mutex lock; /* TmFifo lock */ 193 void __iomem *res0; 194 void __iomem *res1; 195 struct mlxbf_tmfifo_io rx; 196 struct mlxbf_tmfifo_io tx; 197 int rx_fifo_size; 198 int tx_fifo_size; 199 unsigned long pend_events; 200 struct mlxbf_tmfifo_irq_info irq_info[MLXBF_TM_MAX_IRQ]; 201 struct work_struct work; 202 struct timer_list timer; 203 struct mlxbf_tmfifo_vring *vring[2]; 204 spinlock_t spin_lock[2]; /* spin lock */ 205 bool is_ready; 206 }; 207 208 /** 209 * struct mlxbf_tmfifo_msg_hdr - Structure of the TmFifo message header 210 * @type: message type 211 * @len: payload length in network byte order. Messages sent into the FIFO 212 * will be read by the other side as data stream in the same byte order. 213 * The length needs to be encoded into network order so both sides 214 * could understand it. 215 */ 216 struct mlxbf_tmfifo_msg_hdr { 217 u8 type; 218 __be16 len; 219 /* private: */ 220 u8 unused[5]; 221 } __packed __aligned(sizeof(u64)); 222 223 /* 224 * Default MAC. 225 * This MAC address will be read from EFI persistent variable if configured. 226 * It can also be reconfigured with standard Linux tools. 227 */ 228 static u8 mlxbf_tmfifo_net_default_mac[ETH_ALEN] = { 229 0x00, 0x1A, 0xCA, 0xFF, 0xFF, 0x01 230 }; 231 232 /* EFI variable name of the MAC address. */ 233 static efi_char16_t mlxbf_tmfifo_efi_name[] = L"RshimMacAddr"; 234 235 /* Maximum L2 header length. */ 236 #define MLXBF_TMFIFO_NET_L2_OVERHEAD (ETH_HLEN + VLAN_HLEN) 237 238 /* Supported virtio-net features. */ 239 #define MLXBF_TMFIFO_NET_FEATURES \ 240 (BIT_ULL(VIRTIO_NET_F_MTU) | BIT_ULL(VIRTIO_NET_F_STATUS) | \ 241 BIT_ULL(VIRTIO_NET_F_MAC)) 242 243 #define mlxbf_vdev_to_tmfifo(d) container_of(d, struct mlxbf_tmfifo_vdev, vdev) 244 245 /* Free vrings of the FIFO device. */ 246 static void mlxbf_tmfifo_free_vrings(struct mlxbf_tmfifo *fifo, 247 struct mlxbf_tmfifo_vdev *tm_vdev) 248 { 249 struct mlxbf_tmfifo_vring *vring; 250 int i, size; 251 252 for (i = 0; i < ARRAY_SIZE(tm_vdev->vrings); i++) { 253 vring = &tm_vdev->vrings[i]; 254 if (vring->va) { 255 size = vring_size(vring->num, vring->align); 256 dma_free_coherent(tm_vdev->vdev.dev.parent, size, 257 vring->va, vring->dma); 258 vring->va = NULL; 259 if (vring->vq) { 260 vring_del_virtqueue(vring->vq); 261 vring->vq = NULL; 262 } 263 } 264 } 265 } 266 267 /* Allocate vrings for the FIFO. */ 268 static int mlxbf_tmfifo_alloc_vrings(struct mlxbf_tmfifo *fifo, 269 struct mlxbf_tmfifo_vdev *tm_vdev) 270 { 271 struct mlxbf_tmfifo_vring *vring; 272 struct device *dev; 273 dma_addr_t dma; 274 int i, size; 275 void *va; 276 277 for (i = 0; i < ARRAY_SIZE(tm_vdev->vrings); i++) { 278 vring = &tm_vdev->vrings[i]; 279 vring->fifo = fifo; 280 vring->num = MLXBF_TMFIFO_VRING_SIZE; 281 vring->align = SMP_CACHE_BYTES; 282 vring->index = i; 283 vring->vdev_id = tm_vdev->vdev.id.device; 284 vring->drop_desc.len = cpu_to_virtio32(&tm_vdev->vdev, 285 VRING_DROP_DESC_MAX_LEN); 286 dev = &tm_vdev->vdev.dev; 287 288 size = vring_size(vring->num, vring->align); 289 va = dma_alloc_coherent(dev->parent, size, &dma, GFP_KERNEL); 290 if (!va) { 291 mlxbf_tmfifo_free_vrings(fifo, tm_vdev); 292 dev_err(dev->parent, "dma_alloc_coherent failed\n"); 293 return -ENOMEM; 294 } 295 296 vring->va = va; 297 vring->dma = dma; 298 } 299 300 return 0; 301 } 302 303 /* Disable interrupts of the FIFO device. */ 304 static void mlxbf_tmfifo_disable_irqs(struct mlxbf_tmfifo *fifo) 305 { 306 int i, irq; 307 308 for (i = 0; i < MLXBF_TM_MAX_IRQ; i++) { 309 irq = fifo->irq_info[i].irq; 310 fifo->irq_info[i].irq = 0; 311 disable_irq(irq); 312 } 313 } 314 315 /* Interrupt handler. */ 316 static irqreturn_t mlxbf_tmfifo_irq_handler(int irq, void *arg) 317 { 318 struct mlxbf_tmfifo_irq_info *irq_info = arg; 319 320 if (!test_and_set_bit(irq_info->index, &irq_info->fifo->pend_events)) 321 schedule_work(&irq_info->fifo->work); 322 323 return IRQ_HANDLED; 324 } 325 326 /* Get the next packet descriptor from the vring. */ 327 static struct vring_desc * 328 mlxbf_tmfifo_get_next_desc(struct mlxbf_tmfifo_vring *vring) 329 { 330 const struct vring *vr = virtqueue_get_vring(vring->vq); 331 struct virtio_device *vdev = vring->vq->vdev; 332 unsigned int idx, head; 333 334 if (vring->next_avail == virtio16_to_cpu(vdev, vr->avail->idx)) 335 return NULL; 336 337 /* Make sure 'avail->idx' is visible already. */ 338 virtio_rmb(false); 339 340 idx = vring->next_avail % vr->num; 341 head = virtio16_to_cpu(vdev, vr->avail->ring[idx]); 342 if (WARN_ON(head >= vr->num)) 343 return NULL; 344 345 vring->next_avail++; 346 347 return &vr->desc[head]; 348 } 349 350 /* Release virtio descriptor. */ 351 static void mlxbf_tmfifo_release_desc(struct mlxbf_tmfifo_vring *vring, 352 struct vring_desc *desc, u32 len) 353 { 354 const struct vring *vr = virtqueue_get_vring(vring->vq); 355 struct virtio_device *vdev = vring->vq->vdev; 356 u16 idx, vr_idx; 357 358 vr_idx = virtio16_to_cpu(vdev, vr->used->idx); 359 idx = vr_idx % vr->num; 360 vr->used->ring[idx].id = cpu_to_virtio32(vdev, desc - vr->desc); 361 vr->used->ring[idx].len = cpu_to_virtio32(vdev, len); 362 363 /* 364 * Virtio could poll and check the 'idx' to decide whether the desc is 365 * done or not. Add a memory barrier here to make sure the update above 366 * completes before updating the idx. 367 */ 368 virtio_mb(false); 369 vr->used->idx = cpu_to_virtio16(vdev, vr_idx + 1); 370 } 371 372 /* Get the total length of the descriptor chain. */ 373 static u32 mlxbf_tmfifo_get_pkt_len(struct mlxbf_tmfifo_vring *vring, 374 struct vring_desc *desc) 375 { 376 const struct vring *vr = virtqueue_get_vring(vring->vq); 377 struct virtio_device *vdev = vring->vq->vdev; 378 u32 len = 0, idx; 379 380 while (desc) { 381 len += virtio32_to_cpu(vdev, desc->len); 382 if (!(virtio16_to_cpu(vdev, desc->flags) & VRING_DESC_F_NEXT)) 383 break; 384 idx = virtio16_to_cpu(vdev, desc->next); 385 desc = &vr->desc[idx]; 386 } 387 388 return len; 389 } 390 391 static void mlxbf_tmfifo_release_pkt(struct mlxbf_tmfifo_vring *vring) 392 { 393 struct vring_desc *desc_head; 394 u32 len = 0; 395 396 if (vring->desc_head) { 397 desc_head = vring->desc_head; 398 len = vring->pkt_len; 399 } else { 400 desc_head = mlxbf_tmfifo_get_next_desc(vring); 401 len = mlxbf_tmfifo_get_pkt_len(vring, desc_head); 402 } 403 404 if (desc_head) 405 mlxbf_tmfifo_release_desc(vring, desc_head, len); 406 407 vring->pkt_len = 0; 408 vring->desc = NULL; 409 vring->desc_head = NULL; 410 } 411 412 static void mlxbf_tmfifo_init_net_desc(struct mlxbf_tmfifo_vring *vring, 413 struct vring_desc *desc, bool is_rx) 414 { 415 struct virtio_device *vdev = vring->vq->vdev; 416 struct virtio_net_hdr *net_hdr; 417 418 net_hdr = phys_to_virt(virtio64_to_cpu(vdev, desc->addr)); 419 memset(net_hdr, 0, sizeof(*net_hdr)); 420 } 421 422 /* Get and initialize the next packet. */ 423 static struct vring_desc * 424 mlxbf_tmfifo_get_next_pkt(struct mlxbf_tmfifo_vring *vring, bool is_rx) 425 { 426 struct vring_desc *desc; 427 428 desc = mlxbf_tmfifo_get_next_desc(vring); 429 if (desc && is_rx && vring->vdev_id == VIRTIO_ID_NET) 430 mlxbf_tmfifo_init_net_desc(vring, desc, is_rx); 431 432 vring->desc_head = desc; 433 vring->desc = desc; 434 435 return desc; 436 } 437 438 /* House-keeping timer. */ 439 static void mlxbf_tmfifo_timer(struct timer_list *t) 440 { 441 struct mlxbf_tmfifo *fifo = container_of(t, struct mlxbf_tmfifo, timer); 442 int rx, tx; 443 444 rx = !test_and_set_bit(MLXBF_TM_RX_HWM_IRQ, &fifo->pend_events); 445 tx = !test_and_set_bit(MLXBF_TM_TX_LWM_IRQ, &fifo->pend_events); 446 447 if (rx || tx) 448 schedule_work(&fifo->work); 449 450 mod_timer(&fifo->timer, jiffies + MLXBF_TMFIFO_TIMER_INTERVAL); 451 } 452 453 /* Copy one console packet into the output buffer. */ 454 static void mlxbf_tmfifo_console_output_one(struct mlxbf_tmfifo_vdev *cons, 455 struct mlxbf_tmfifo_vring *vring, 456 struct vring_desc *desc) 457 { 458 const struct vring *vr = virtqueue_get_vring(vring->vq); 459 struct virtio_device *vdev = &cons->vdev; 460 u32 len, idx, seg; 461 void *addr; 462 463 while (desc) { 464 addr = phys_to_virt(virtio64_to_cpu(vdev, desc->addr)); 465 len = virtio32_to_cpu(vdev, desc->len); 466 467 seg = CIRC_SPACE_TO_END(cons->tx_buf.head, cons->tx_buf.tail, 468 MLXBF_TMFIFO_CON_TX_BUF_SIZE); 469 if (len <= seg) { 470 memcpy(cons->tx_buf.buf + cons->tx_buf.head, addr, len); 471 } else { 472 memcpy(cons->tx_buf.buf + cons->tx_buf.head, addr, seg); 473 addr += seg; 474 memcpy(cons->tx_buf.buf, addr, len - seg); 475 } 476 cons->tx_buf.head = (cons->tx_buf.head + len) % 477 MLXBF_TMFIFO_CON_TX_BUF_SIZE; 478 479 if (!(virtio16_to_cpu(vdev, desc->flags) & VRING_DESC_F_NEXT)) 480 break; 481 idx = virtio16_to_cpu(vdev, desc->next); 482 desc = &vr->desc[idx]; 483 } 484 } 485 486 /* Copy console data into the output buffer. */ 487 static void mlxbf_tmfifo_console_output(struct mlxbf_tmfifo_vdev *cons, 488 struct mlxbf_tmfifo_vring *vring) 489 { 490 struct vring_desc *desc; 491 u32 len, avail; 492 493 desc = mlxbf_tmfifo_get_next_desc(vring); 494 while (desc) { 495 /* Release the packet if not enough space. */ 496 len = mlxbf_tmfifo_get_pkt_len(vring, desc); 497 avail = CIRC_SPACE(cons->tx_buf.head, cons->tx_buf.tail, 498 MLXBF_TMFIFO_CON_TX_BUF_SIZE); 499 if (len + MLXBF_TMFIFO_CON_TX_BUF_RSV_SIZE > avail) { 500 mlxbf_tmfifo_release_desc(vring, desc, len); 501 break; 502 } 503 504 mlxbf_tmfifo_console_output_one(cons, vring, desc); 505 mlxbf_tmfifo_release_desc(vring, desc, len); 506 desc = mlxbf_tmfifo_get_next_desc(vring); 507 } 508 } 509 510 /* Get the number of available words in Rx FIFO for receiving. */ 511 static int mlxbf_tmfifo_get_rx_avail(struct mlxbf_tmfifo *fifo) 512 { 513 u64 sts; 514 515 sts = readq(fifo->rx.sts); 516 return FIELD_GET(MLXBF_TMFIFO_RX_STS__COUNT_MASK, sts); 517 } 518 519 /* Get the number of available words in the TmFifo for sending. */ 520 static int mlxbf_tmfifo_get_tx_avail(struct mlxbf_tmfifo *fifo, int vdev_id) 521 { 522 int tx_reserve; 523 u32 count; 524 u64 sts; 525 526 /* Reserve some room in FIFO for console messages. */ 527 if (vdev_id == VIRTIO_ID_NET) 528 tx_reserve = fifo->tx_fifo_size / MLXBF_TMFIFO_RESERVE_RATIO; 529 else 530 tx_reserve = 1; 531 532 sts = readq(fifo->tx.sts); 533 count = FIELD_GET(MLXBF_TMFIFO_TX_STS__COUNT_MASK, sts); 534 return fifo->tx_fifo_size - tx_reserve - count; 535 } 536 537 /* Console Tx (move data from the output buffer into the TmFifo). */ 538 static void mlxbf_tmfifo_console_tx(struct mlxbf_tmfifo *fifo, int avail) 539 { 540 struct mlxbf_tmfifo_msg_hdr hdr; 541 struct mlxbf_tmfifo_vdev *cons; 542 unsigned long flags; 543 int size, seg; 544 void *addr; 545 u64 data; 546 547 /* Return if not enough space available. */ 548 if (avail < MLXBF_TMFIFO_DATA_MIN_WORDS) 549 return; 550 551 cons = fifo->vdev[VIRTIO_ID_CONSOLE]; 552 if (!cons || !cons->tx_buf.buf) 553 return; 554 555 /* Return if no data to send. */ 556 size = CIRC_CNT(cons->tx_buf.head, cons->tx_buf.tail, 557 MLXBF_TMFIFO_CON_TX_BUF_SIZE); 558 if (size == 0) 559 return; 560 561 /* Adjust the size to available space. */ 562 if (size + sizeof(hdr) > avail * sizeof(u64)) 563 size = avail * sizeof(u64) - sizeof(hdr); 564 565 /* Write header. */ 566 hdr.type = VIRTIO_ID_CONSOLE; 567 hdr.len = htons(size); 568 writeq(*(u64 *)&hdr, fifo->tx.data); 569 570 /* Use spin-lock to protect the 'cons->tx_buf'. */ 571 spin_lock_irqsave(&fifo->spin_lock[0], flags); 572 573 while (size > 0) { 574 addr = cons->tx_buf.buf + cons->tx_buf.tail; 575 576 seg = CIRC_CNT_TO_END(cons->tx_buf.head, cons->tx_buf.tail, 577 MLXBF_TMFIFO_CON_TX_BUF_SIZE); 578 if (seg >= sizeof(u64)) { 579 memcpy(&data, addr, sizeof(u64)); 580 } else { 581 memcpy(&data, addr, seg); 582 memcpy((u8 *)&data + seg, cons->tx_buf.buf, 583 sizeof(u64) - seg); 584 } 585 writeq(data, fifo->tx.data); 586 587 if (size >= sizeof(u64)) { 588 cons->tx_buf.tail = (cons->tx_buf.tail + sizeof(u64)) % 589 MLXBF_TMFIFO_CON_TX_BUF_SIZE; 590 size -= sizeof(u64); 591 } else { 592 cons->tx_buf.tail = (cons->tx_buf.tail + size) % 593 MLXBF_TMFIFO_CON_TX_BUF_SIZE; 594 size = 0; 595 } 596 } 597 598 spin_unlock_irqrestore(&fifo->spin_lock[0], flags); 599 } 600 601 /* Rx/Tx one word in the descriptor buffer. */ 602 static void mlxbf_tmfifo_rxtx_word(struct mlxbf_tmfifo_vring *vring, 603 struct vring_desc *desc, 604 bool is_rx, int len) 605 { 606 struct virtio_device *vdev = vring->vq->vdev; 607 struct mlxbf_tmfifo *fifo = vring->fifo; 608 void *addr; 609 u64 data; 610 611 /* Get the buffer address of this desc. */ 612 addr = phys_to_virt(virtio64_to_cpu(vdev, desc->addr)); 613 614 /* Read a word from FIFO for Rx. */ 615 if (is_rx) 616 data = readq(fifo->rx.data); 617 618 if (vring->cur_len + sizeof(u64) <= len) { 619 /* The whole word. */ 620 if (is_rx) { 621 if (!IS_VRING_DROP(vring)) 622 memcpy(addr + vring->cur_len, &data, 623 sizeof(u64)); 624 } else { 625 memcpy(&data, addr + vring->cur_len, 626 sizeof(u64)); 627 } 628 vring->cur_len += sizeof(u64); 629 } else { 630 /* Leftover bytes. */ 631 if (is_rx) { 632 if (!IS_VRING_DROP(vring)) 633 memcpy(addr + vring->cur_len, &data, 634 len - vring->cur_len); 635 } else { 636 data = 0; 637 memcpy(&data, addr + vring->cur_len, 638 len - vring->cur_len); 639 } 640 vring->cur_len = len; 641 } 642 643 /* Write the word into FIFO for Tx. */ 644 if (!is_rx) 645 writeq(data, fifo->tx.data); 646 } 647 648 /* 649 * Rx/Tx packet header. 650 * 651 * In Rx case, the packet might be found to belong to a different vring since 652 * the TmFifo is shared by different services. In such case, the 'vring_change' 653 * flag is set. 654 */ 655 static void mlxbf_tmfifo_rxtx_header(struct mlxbf_tmfifo_vring *vring, 656 struct vring_desc **desc, 657 bool is_rx, bool *vring_change) 658 { 659 struct mlxbf_tmfifo *fifo = vring->fifo; 660 struct virtio_net_config *config; 661 struct mlxbf_tmfifo_msg_hdr hdr; 662 int vdev_id, hdr_len; 663 bool drop_rx = false; 664 665 /* Read/Write packet header. */ 666 if (is_rx) { 667 /* Drain one word from the FIFO. */ 668 *(u64 *)&hdr = readq(fifo->rx.data); 669 670 /* Skip the length 0 packets (keepalive). */ 671 if (hdr.len == 0) 672 return; 673 674 /* Check packet type. */ 675 if (hdr.type == VIRTIO_ID_NET) { 676 vdev_id = VIRTIO_ID_NET; 677 hdr_len = sizeof(struct virtio_net_hdr); 678 config = &fifo->vdev[vdev_id]->config.net; 679 /* A legacy-only interface for now. */ 680 if (ntohs(hdr.len) > 681 __virtio16_to_cpu(virtio_legacy_is_little_endian(), 682 config->mtu) + 683 MLXBF_TMFIFO_NET_L2_OVERHEAD) 684 drop_rx = true; 685 } else { 686 vdev_id = VIRTIO_ID_CONSOLE; 687 hdr_len = 0; 688 } 689 690 /* 691 * Check whether the new packet still belongs to this vring. 692 * If not, update the pkt_len of the new vring. 693 */ 694 if (vdev_id != vring->vdev_id) { 695 struct mlxbf_tmfifo_vdev *tm_dev2 = fifo->vdev[vdev_id]; 696 697 if (!tm_dev2) 698 return; 699 vring->desc = *desc; 700 vring = &tm_dev2->vrings[MLXBF_TMFIFO_VRING_RX]; 701 *vring_change = true; 702 } 703 704 if (drop_rx && !IS_VRING_DROP(vring)) { 705 if (vring->desc_head) 706 mlxbf_tmfifo_release_pkt(vring); 707 *desc = &vring->drop_desc; 708 vring->desc_head = *desc; 709 vring->desc = *desc; 710 } 711 712 vring->pkt_len = ntohs(hdr.len) + hdr_len; 713 } else { 714 /* Network virtio has an extra header. */ 715 hdr_len = (vring->vdev_id == VIRTIO_ID_NET) ? 716 sizeof(struct virtio_net_hdr) : 0; 717 vring->pkt_len = mlxbf_tmfifo_get_pkt_len(vring, *desc); 718 hdr.type = (vring->vdev_id == VIRTIO_ID_NET) ? 719 VIRTIO_ID_NET : VIRTIO_ID_CONSOLE; 720 hdr.len = htons(vring->pkt_len - hdr_len); 721 writeq(*(u64 *)&hdr, fifo->tx.data); 722 } 723 724 vring->cur_len = hdr_len; 725 vring->rem_len = vring->pkt_len; 726 fifo->vring[is_rx] = vring; 727 } 728 729 /* 730 * Rx/Tx one descriptor. 731 * 732 * Return true to indicate more data available. 733 */ 734 static bool mlxbf_tmfifo_rxtx_one_desc(struct mlxbf_tmfifo_vring *vring, 735 bool is_rx, int *avail) 736 { 737 const struct vring *vr = virtqueue_get_vring(vring->vq); 738 struct mlxbf_tmfifo *fifo = vring->fifo; 739 struct virtio_device *vdev; 740 bool vring_change = false; 741 struct vring_desc *desc; 742 unsigned long flags; 743 u32 len, idx; 744 745 vdev = &fifo->vdev[vring->vdev_id]->vdev; 746 747 /* Get the descriptor of the next packet. */ 748 if (!vring->desc) { 749 desc = mlxbf_tmfifo_get_next_pkt(vring, is_rx); 750 if (!desc) { 751 /* Drop next Rx packet to avoid stuck. */ 752 if (is_rx) { 753 desc = &vring->drop_desc; 754 vring->desc_head = desc; 755 vring->desc = desc; 756 } else { 757 return false; 758 } 759 } 760 } else { 761 desc = vring->desc; 762 } 763 764 /* Beginning of a packet. Start to Rx/Tx packet header. */ 765 if (vring->pkt_len == 0) { 766 mlxbf_tmfifo_rxtx_header(vring, &desc, is_rx, &vring_change); 767 (*avail)--; 768 769 /* Return if new packet is for another ring. */ 770 if (vring_change) 771 return false; 772 goto mlxbf_tmfifo_desc_done; 773 } 774 775 /* Get the length of this desc. */ 776 len = virtio32_to_cpu(vdev, desc->len); 777 if (len > vring->rem_len) 778 len = vring->rem_len; 779 780 /* Rx/Tx one word (8 bytes) if not done. */ 781 if (vring->cur_len < len) { 782 mlxbf_tmfifo_rxtx_word(vring, desc, is_rx, len); 783 (*avail)--; 784 } 785 786 /* Check again whether it's done. */ 787 if (vring->cur_len == len) { 788 vring->cur_len = 0; 789 vring->rem_len -= len; 790 791 /* Get the next desc on the chain. */ 792 if (!IS_VRING_DROP(vring) && vring->rem_len > 0 && 793 (virtio16_to_cpu(vdev, desc->flags) & VRING_DESC_F_NEXT)) { 794 idx = virtio16_to_cpu(vdev, desc->next); 795 desc = &vr->desc[idx]; 796 goto mlxbf_tmfifo_desc_done; 797 } 798 799 /* Done and release the packet. */ 800 desc = NULL; 801 fifo->vring[is_rx] = NULL; 802 if (!IS_VRING_DROP(vring)) { 803 mlxbf_tmfifo_release_pkt(vring); 804 } else { 805 vring->pkt_len = 0; 806 vring->desc_head = NULL; 807 vring->desc = NULL; 808 return false; 809 } 810 811 /* 812 * Make sure the load/store are in order before 813 * returning back to virtio. 814 */ 815 virtio_mb(false); 816 817 /* Notify upper layer that packet is done. */ 818 spin_lock_irqsave(&fifo->spin_lock[is_rx], flags); 819 vring_interrupt(0, vring->vq); 820 spin_unlock_irqrestore(&fifo->spin_lock[is_rx], flags); 821 } 822 823 mlxbf_tmfifo_desc_done: 824 /* Save the current desc. */ 825 vring->desc = desc; 826 827 return true; 828 } 829 830 static void mlxbf_tmfifo_check_tx_timeout(struct mlxbf_tmfifo_vring *vring) 831 { 832 unsigned long flags; 833 834 /* Only handle Tx timeout for network vdev. */ 835 if (vring->vdev_id != VIRTIO_ID_NET) 836 return; 837 838 /* Initialize the timeout or return if not expired. */ 839 if (!vring->tx_timeout) { 840 /* Initialize the timeout. */ 841 vring->tx_timeout = jiffies + 842 msecs_to_jiffies(TMFIFO_TX_TIMEOUT); 843 return; 844 } else if (time_before(jiffies, vring->tx_timeout)) { 845 /* Return if not timeout yet. */ 846 return; 847 } 848 849 /* 850 * Drop the packet after timeout. The outstanding packet is 851 * released and the remaining bytes will be sent with padding byte 0x00 852 * as a recovery. On the peer(host) side, the padding bytes 0x00 will be 853 * either dropped directly, or appended into existing outstanding packet 854 * thus dropped as corrupted network packet. 855 */ 856 vring->rem_padding = round_up(vring->rem_len, sizeof(u64)); 857 mlxbf_tmfifo_release_pkt(vring); 858 vring->cur_len = 0; 859 vring->rem_len = 0; 860 vring->fifo->vring[0] = NULL; 861 862 /* 863 * Make sure the load/store are in order before 864 * returning back to virtio. 865 */ 866 virtio_mb(false); 867 868 /* Notify upper layer. */ 869 spin_lock_irqsave(&vring->fifo->spin_lock[0], flags); 870 vring_interrupt(0, vring->vq); 871 spin_unlock_irqrestore(&vring->fifo->spin_lock[0], flags); 872 } 873 874 /* Rx & Tx processing of a queue. */ 875 static void mlxbf_tmfifo_rxtx(struct mlxbf_tmfifo_vring *vring, bool is_rx) 876 { 877 int avail = 0, devid = vring->vdev_id; 878 struct mlxbf_tmfifo *fifo; 879 bool more; 880 881 fifo = vring->fifo; 882 883 /* Return if vdev is not ready. */ 884 if (!fifo || !fifo->vdev[devid]) 885 return; 886 887 /* Return if another vring is running. */ 888 if (fifo->vring[is_rx] && fifo->vring[is_rx] != vring) 889 return; 890 891 /* Only handle console and network for now. */ 892 if (WARN_ON(devid != VIRTIO_ID_NET && devid != VIRTIO_ID_CONSOLE)) 893 return; 894 895 do { 896 retry: 897 /* Get available FIFO space. */ 898 if (avail == 0) { 899 if (is_rx) 900 avail = mlxbf_tmfifo_get_rx_avail(fifo); 901 else 902 avail = mlxbf_tmfifo_get_tx_avail(fifo, devid); 903 if (avail <= 0) 904 break; 905 } 906 907 /* Insert paddings for discarded Tx packet. */ 908 if (!is_rx) { 909 vring->tx_timeout = 0; 910 while (vring->rem_padding >= sizeof(u64)) { 911 writeq(0, vring->fifo->tx.data); 912 vring->rem_padding -= sizeof(u64); 913 if (--avail == 0) 914 goto retry; 915 } 916 } 917 918 /* Console output always comes from the Tx buffer. */ 919 if (!is_rx && devid == VIRTIO_ID_CONSOLE) { 920 mlxbf_tmfifo_console_tx(fifo, avail); 921 break; 922 } 923 924 /* Handle one descriptor. */ 925 more = mlxbf_tmfifo_rxtx_one_desc(vring, is_rx, &avail); 926 } while (more); 927 928 /* Check Tx timeout. */ 929 if (avail <= 0 && !is_rx) 930 mlxbf_tmfifo_check_tx_timeout(vring); 931 } 932 933 /* Handle Rx or Tx queues. */ 934 static void mlxbf_tmfifo_work_rxtx(struct mlxbf_tmfifo *fifo, int queue_id, 935 int irq_id, bool is_rx) 936 { 937 struct mlxbf_tmfifo_vdev *tm_vdev; 938 struct mlxbf_tmfifo_vring *vring; 939 int i; 940 941 if (!test_and_clear_bit(irq_id, &fifo->pend_events) || 942 !fifo->irq_info[irq_id].irq) 943 return; 944 945 for (i = 0; i < MLXBF_TMFIFO_VDEV_MAX; i++) { 946 tm_vdev = fifo->vdev[i]; 947 if (tm_vdev) { 948 vring = &tm_vdev->vrings[queue_id]; 949 if (vring->vq) 950 mlxbf_tmfifo_rxtx(vring, is_rx); 951 } 952 } 953 } 954 955 /* Work handler for Rx and Tx case. */ 956 static void mlxbf_tmfifo_work_handler(struct work_struct *work) 957 { 958 struct mlxbf_tmfifo *fifo; 959 960 fifo = container_of(work, struct mlxbf_tmfifo, work); 961 if (!fifo->is_ready) 962 return; 963 964 mutex_lock(&fifo->lock); 965 966 /* Tx (Send data to the TmFifo). */ 967 mlxbf_tmfifo_work_rxtx(fifo, MLXBF_TMFIFO_VRING_TX, 968 MLXBF_TM_TX_LWM_IRQ, false); 969 970 /* Rx (Receive data from the TmFifo). */ 971 mlxbf_tmfifo_work_rxtx(fifo, MLXBF_TMFIFO_VRING_RX, 972 MLXBF_TM_RX_HWM_IRQ, true); 973 974 mutex_unlock(&fifo->lock); 975 } 976 977 /* The notify function is called when new buffers are posted. */ 978 static bool mlxbf_tmfifo_virtio_notify(struct virtqueue *vq) 979 { 980 struct mlxbf_tmfifo_vring *vring = vq->priv; 981 struct mlxbf_tmfifo_vdev *tm_vdev; 982 struct mlxbf_tmfifo *fifo; 983 unsigned long flags; 984 985 fifo = vring->fifo; 986 987 /* 988 * Virtio maintains vrings in pairs, even number ring for Rx 989 * and odd number ring for Tx. 990 */ 991 if (vring->index & BIT(0)) { 992 /* 993 * Console could make blocking call with interrupts disabled. 994 * In such case, the vring needs to be served right away. For 995 * other cases, just set the TX LWM bit to start Tx in the 996 * worker handler. 997 */ 998 if (vring->vdev_id == VIRTIO_ID_CONSOLE) { 999 spin_lock_irqsave(&fifo->spin_lock[0], flags); 1000 tm_vdev = fifo->vdev[VIRTIO_ID_CONSOLE]; 1001 mlxbf_tmfifo_console_output(tm_vdev, vring); 1002 spin_unlock_irqrestore(&fifo->spin_lock[0], flags); 1003 set_bit(MLXBF_TM_TX_LWM_IRQ, &fifo->pend_events); 1004 } else if (test_and_set_bit(MLXBF_TM_TX_LWM_IRQ, 1005 &fifo->pend_events)) { 1006 return true; 1007 } 1008 } else { 1009 if (test_and_set_bit(MLXBF_TM_RX_HWM_IRQ, &fifo->pend_events)) 1010 return true; 1011 } 1012 1013 schedule_work(&fifo->work); 1014 1015 return true; 1016 } 1017 1018 /* Get the array of feature bits for this device. */ 1019 static u64 mlxbf_tmfifo_virtio_get_features(struct virtio_device *vdev) 1020 { 1021 struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev); 1022 1023 return tm_vdev->features; 1024 } 1025 1026 /* Confirm device features to use. */ 1027 static int mlxbf_tmfifo_virtio_finalize_features(struct virtio_device *vdev) 1028 { 1029 struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev); 1030 1031 tm_vdev->features = vdev->features; 1032 1033 return 0; 1034 } 1035 1036 /* Free virtqueues found by find_vqs(). */ 1037 static void mlxbf_tmfifo_virtio_del_vqs(struct virtio_device *vdev) 1038 { 1039 struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev); 1040 struct mlxbf_tmfifo_vring *vring; 1041 struct virtqueue *vq; 1042 int i; 1043 1044 for (i = 0; i < ARRAY_SIZE(tm_vdev->vrings); i++) { 1045 vring = &tm_vdev->vrings[i]; 1046 1047 /* Release the pending packet. */ 1048 if (vring->desc) 1049 mlxbf_tmfifo_release_pkt(vring); 1050 vq = vring->vq; 1051 if (vq) { 1052 vring->vq = NULL; 1053 vring_del_virtqueue(vq); 1054 } 1055 } 1056 } 1057 1058 /* Create and initialize the virtual queues. */ 1059 static int mlxbf_tmfifo_virtio_find_vqs(struct virtio_device *vdev, 1060 unsigned int nvqs, 1061 struct virtqueue *vqs[], 1062 struct virtqueue_info vqs_info[], 1063 struct irq_affinity *desc) 1064 { 1065 struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev); 1066 struct mlxbf_tmfifo_vring *vring; 1067 struct virtqueue *vq; 1068 int i, ret, size; 1069 1070 if (nvqs > ARRAY_SIZE(tm_vdev->vrings)) 1071 return -EINVAL; 1072 1073 for (i = 0; i < nvqs; ++i) { 1074 struct virtqueue_info *vqi = &vqs_info[i]; 1075 1076 if (!vqi->name) { 1077 ret = -EINVAL; 1078 goto error; 1079 } 1080 vring = &tm_vdev->vrings[i]; 1081 1082 /* zero vring */ 1083 size = vring_size(vring->num, vring->align); 1084 memset(vring->va, 0, size); 1085 vq = vring_new_virtqueue(i, vring->num, vring->align, vdev, 1086 false, false, vring->va, 1087 mlxbf_tmfifo_virtio_notify, 1088 vqi->callback, vqi->name); 1089 if (!vq) { 1090 dev_err(&vdev->dev, "vring_new_virtqueue failed\n"); 1091 ret = -ENOMEM; 1092 goto error; 1093 } 1094 1095 vq->num_max = vring->num; 1096 1097 vq->priv = vring; 1098 1099 /* Make vq update visible before using it. */ 1100 virtio_mb(false); 1101 1102 vqs[i] = vq; 1103 vring->vq = vq; 1104 } 1105 1106 return 0; 1107 1108 error: 1109 mlxbf_tmfifo_virtio_del_vqs(vdev); 1110 return ret; 1111 } 1112 1113 /* Read the status byte. */ 1114 static u8 mlxbf_tmfifo_virtio_get_status(struct virtio_device *vdev) 1115 { 1116 struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev); 1117 1118 return tm_vdev->status; 1119 } 1120 1121 /* Write the status byte. */ 1122 static void mlxbf_tmfifo_virtio_set_status(struct virtio_device *vdev, 1123 u8 status) 1124 { 1125 struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev); 1126 1127 tm_vdev->status = status; 1128 } 1129 1130 /* Reset the device. Not much here for now. */ 1131 static void mlxbf_tmfifo_virtio_reset(struct virtio_device *vdev) 1132 { 1133 struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev); 1134 1135 tm_vdev->status = 0; 1136 } 1137 1138 /* Read the value of a configuration field. */ 1139 static void mlxbf_tmfifo_virtio_get(struct virtio_device *vdev, 1140 unsigned int offset, 1141 void *buf, 1142 unsigned int len) 1143 { 1144 struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev); 1145 1146 if ((u64)offset + len > sizeof(tm_vdev->config)) 1147 return; 1148 1149 memcpy(buf, (u8 *)&tm_vdev->config + offset, len); 1150 } 1151 1152 /* Write the value of a configuration field. */ 1153 static void mlxbf_tmfifo_virtio_set(struct virtio_device *vdev, 1154 unsigned int offset, 1155 const void *buf, 1156 unsigned int len) 1157 { 1158 struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev); 1159 1160 if ((u64)offset + len > sizeof(tm_vdev->config)) 1161 return; 1162 1163 memcpy((u8 *)&tm_vdev->config + offset, buf, len); 1164 } 1165 1166 static void tmfifo_virtio_dev_release(struct device *device) 1167 { 1168 struct virtio_device *vdev = 1169 container_of(device, struct virtio_device, dev); 1170 struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev); 1171 1172 kfree(tm_vdev); 1173 } 1174 1175 /* Virtio config operations. */ 1176 static const struct virtio_config_ops mlxbf_tmfifo_virtio_config_ops = { 1177 .get_features = mlxbf_tmfifo_virtio_get_features, 1178 .finalize_features = mlxbf_tmfifo_virtio_finalize_features, 1179 .find_vqs = mlxbf_tmfifo_virtio_find_vqs, 1180 .del_vqs = mlxbf_tmfifo_virtio_del_vqs, 1181 .reset = mlxbf_tmfifo_virtio_reset, 1182 .set_status = mlxbf_tmfifo_virtio_set_status, 1183 .get_status = mlxbf_tmfifo_virtio_get_status, 1184 .get = mlxbf_tmfifo_virtio_get, 1185 .set = mlxbf_tmfifo_virtio_set, 1186 }; 1187 1188 /* Create vdev for the FIFO. */ 1189 static int mlxbf_tmfifo_create_vdev(struct device *dev, 1190 struct mlxbf_tmfifo *fifo, 1191 int vdev_id, u64 features, 1192 void *config, u32 size) 1193 { 1194 struct mlxbf_tmfifo_vdev *tm_vdev, *reg_dev = NULL; 1195 int ret; 1196 1197 mutex_lock(&fifo->lock); 1198 1199 tm_vdev = fifo->vdev[vdev_id]; 1200 if (tm_vdev) { 1201 dev_err(dev, "vdev %d already exists\n", vdev_id); 1202 ret = -EEXIST; 1203 goto fail; 1204 } 1205 1206 tm_vdev = kzalloc(sizeof(*tm_vdev), GFP_KERNEL); 1207 if (!tm_vdev) { 1208 ret = -ENOMEM; 1209 goto fail; 1210 } 1211 1212 tm_vdev->vdev.id.device = vdev_id; 1213 tm_vdev->vdev.config = &mlxbf_tmfifo_virtio_config_ops; 1214 tm_vdev->vdev.dev.parent = dev; 1215 tm_vdev->vdev.dev.release = tmfifo_virtio_dev_release; 1216 tm_vdev->features = features; 1217 if (config) 1218 memcpy(&tm_vdev->config, config, size); 1219 1220 if (mlxbf_tmfifo_alloc_vrings(fifo, tm_vdev)) { 1221 dev_err(dev, "unable to allocate vring\n"); 1222 ret = -ENOMEM; 1223 goto vdev_fail; 1224 } 1225 1226 /* Allocate an output buffer for the console device. */ 1227 if (vdev_id == VIRTIO_ID_CONSOLE) 1228 tm_vdev->tx_buf.buf = devm_kmalloc(dev, 1229 MLXBF_TMFIFO_CON_TX_BUF_SIZE, 1230 GFP_KERNEL); 1231 fifo->vdev[vdev_id] = tm_vdev; 1232 1233 /* Register the virtio device. */ 1234 ret = register_virtio_device(&tm_vdev->vdev); 1235 reg_dev = tm_vdev; 1236 if (ret) { 1237 dev_err(dev, "register_virtio_device failed\n"); 1238 goto vdev_fail; 1239 } 1240 1241 mutex_unlock(&fifo->lock); 1242 return 0; 1243 1244 vdev_fail: 1245 mlxbf_tmfifo_free_vrings(fifo, tm_vdev); 1246 fifo->vdev[vdev_id] = NULL; 1247 if (reg_dev) 1248 put_device(&tm_vdev->vdev.dev); 1249 else 1250 kfree(tm_vdev); 1251 fail: 1252 mutex_unlock(&fifo->lock); 1253 return ret; 1254 } 1255 1256 /* Delete vdev for the FIFO. */ 1257 static int mlxbf_tmfifo_delete_vdev(struct mlxbf_tmfifo *fifo, int vdev_id) 1258 { 1259 struct mlxbf_tmfifo_vdev *tm_vdev; 1260 1261 mutex_lock(&fifo->lock); 1262 1263 /* Unregister vdev. */ 1264 tm_vdev = fifo->vdev[vdev_id]; 1265 if (tm_vdev) { 1266 unregister_virtio_device(&tm_vdev->vdev); 1267 mlxbf_tmfifo_free_vrings(fifo, tm_vdev); 1268 fifo->vdev[vdev_id] = NULL; 1269 } 1270 1271 mutex_unlock(&fifo->lock); 1272 1273 return 0; 1274 } 1275 1276 /* Read the configured network MAC address from efi variable. */ 1277 static void mlxbf_tmfifo_get_cfg_mac(u8 *mac) 1278 { 1279 efi_guid_t guid = EFI_GLOBAL_VARIABLE_GUID; 1280 unsigned long size = ETH_ALEN; 1281 u8 buf[ETH_ALEN]; 1282 efi_status_t rc; 1283 1284 rc = efi.get_variable(mlxbf_tmfifo_efi_name, &guid, NULL, &size, buf); 1285 if (rc == EFI_SUCCESS && size == ETH_ALEN) 1286 ether_addr_copy(mac, buf); 1287 else 1288 ether_addr_copy(mac, mlxbf_tmfifo_net_default_mac); 1289 } 1290 1291 /* Set TmFifo thresholds which is used to trigger interrupts. */ 1292 static void mlxbf_tmfifo_set_threshold(struct mlxbf_tmfifo *fifo) 1293 { 1294 u64 ctl; 1295 1296 /* Get Tx FIFO size and set the low/high watermark. */ 1297 ctl = readq(fifo->tx.ctl); 1298 fifo->tx_fifo_size = 1299 FIELD_GET(MLXBF_TMFIFO_TX_CTL__MAX_ENTRIES_MASK, ctl); 1300 ctl = (ctl & ~MLXBF_TMFIFO_TX_CTL__LWM_MASK) | 1301 FIELD_PREP(MLXBF_TMFIFO_TX_CTL__LWM_MASK, 1302 fifo->tx_fifo_size / 2); 1303 ctl = (ctl & ~MLXBF_TMFIFO_TX_CTL__HWM_MASK) | 1304 FIELD_PREP(MLXBF_TMFIFO_TX_CTL__HWM_MASK, 1305 fifo->tx_fifo_size - 1); 1306 writeq(ctl, fifo->tx.ctl); 1307 1308 /* Get Rx FIFO size and set the low/high watermark. */ 1309 ctl = readq(fifo->rx.ctl); 1310 fifo->rx_fifo_size = 1311 FIELD_GET(MLXBF_TMFIFO_RX_CTL__MAX_ENTRIES_MASK, ctl); 1312 ctl = (ctl & ~MLXBF_TMFIFO_RX_CTL__LWM_MASK) | 1313 FIELD_PREP(MLXBF_TMFIFO_RX_CTL__LWM_MASK, 0); 1314 ctl = (ctl & ~MLXBF_TMFIFO_RX_CTL__HWM_MASK) | 1315 FIELD_PREP(MLXBF_TMFIFO_RX_CTL__HWM_MASK, 1); 1316 writeq(ctl, fifo->rx.ctl); 1317 } 1318 1319 static void mlxbf_tmfifo_cleanup(struct mlxbf_tmfifo *fifo) 1320 { 1321 int i; 1322 1323 fifo->is_ready = false; 1324 timer_delete_sync(&fifo->timer); 1325 mlxbf_tmfifo_disable_irqs(fifo); 1326 cancel_work_sync(&fifo->work); 1327 for (i = 0; i < MLXBF_TMFIFO_VDEV_MAX; i++) 1328 mlxbf_tmfifo_delete_vdev(fifo, i); 1329 } 1330 1331 /* Probe the TMFIFO. */ 1332 static int mlxbf_tmfifo_probe(struct platform_device *pdev) 1333 { 1334 struct virtio_net_config net_config; 1335 struct device *dev = &pdev->dev; 1336 struct mlxbf_tmfifo *fifo; 1337 u64 dev_id; 1338 int i, rc; 1339 1340 rc = acpi_dev_uid_to_integer(ACPI_COMPANION(dev), &dev_id); 1341 if (rc) { 1342 dev_err(dev, "Cannot retrieve UID\n"); 1343 return rc; 1344 } 1345 1346 fifo = devm_kzalloc(dev, sizeof(*fifo), GFP_KERNEL); 1347 if (!fifo) 1348 return -ENOMEM; 1349 1350 spin_lock_init(&fifo->spin_lock[0]); 1351 spin_lock_init(&fifo->spin_lock[1]); 1352 INIT_WORK(&fifo->work, mlxbf_tmfifo_work_handler); 1353 mutex_init(&fifo->lock); 1354 1355 /* Get the resource of the Rx FIFO. */ 1356 fifo->res0 = devm_platform_ioremap_resource(pdev, 0); 1357 if (IS_ERR(fifo->res0)) 1358 return PTR_ERR(fifo->res0); 1359 1360 /* Get the resource of the Tx FIFO. */ 1361 fifo->res1 = devm_platform_ioremap_resource(pdev, 1); 1362 if (IS_ERR(fifo->res1)) 1363 return PTR_ERR(fifo->res1); 1364 1365 if (dev_id == TMFIFO_BF3_UID) { 1366 fifo->rx.ctl = fifo->res1 + MLXBF_TMFIFO_RX_CTL_BF3; 1367 fifo->rx.sts = fifo->res1 + MLXBF_TMFIFO_RX_STS_BF3; 1368 fifo->rx.data = fifo->res0 + MLXBF_TMFIFO_RX_DATA_BF3; 1369 fifo->tx.ctl = fifo->res1 + MLXBF_TMFIFO_TX_CTL_BF3; 1370 fifo->tx.sts = fifo->res1 + MLXBF_TMFIFO_TX_STS_BF3; 1371 fifo->tx.data = fifo->res0 + MLXBF_TMFIFO_TX_DATA_BF3; 1372 } else { 1373 fifo->rx.ctl = fifo->res0 + MLXBF_TMFIFO_RX_CTL; 1374 fifo->rx.sts = fifo->res0 + MLXBF_TMFIFO_RX_STS; 1375 fifo->rx.data = fifo->res0 + MLXBF_TMFIFO_RX_DATA; 1376 fifo->tx.ctl = fifo->res1 + MLXBF_TMFIFO_TX_CTL; 1377 fifo->tx.sts = fifo->res1 + MLXBF_TMFIFO_TX_STS; 1378 fifo->tx.data = fifo->res1 + MLXBF_TMFIFO_TX_DATA; 1379 } 1380 1381 platform_set_drvdata(pdev, fifo); 1382 1383 timer_setup(&fifo->timer, mlxbf_tmfifo_timer, 0); 1384 1385 for (i = 0; i < MLXBF_TM_MAX_IRQ; i++) { 1386 fifo->irq_info[i].index = i; 1387 fifo->irq_info[i].fifo = fifo; 1388 fifo->irq_info[i].irq = platform_get_irq(pdev, i); 1389 rc = devm_request_irq(dev, fifo->irq_info[i].irq, 1390 mlxbf_tmfifo_irq_handler, 0, 1391 "tmfifo", &fifo->irq_info[i]); 1392 if (rc) { 1393 dev_err(dev, "devm_request_irq failed\n"); 1394 fifo->irq_info[i].irq = 0; 1395 return rc; 1396 } 1397 } 1398 1399 mlxbf_tmfifo_set_threshold(fifo); 1400 1401 /* Create the console vdev. */ 1402 rc = mlxbf_tmfifo_create_vdev(dev, fifo, VIRTIO_ID_CONSOLE, 0, NULL, 0); 1403 if (rc) 1404 goto fail; 1405 1406 /* Create the network vdev. */ 1407 memset(&net_config, 0, sizeof(net_config)); 1408 1409 /* A legacy-only interface for now. */ 1410 net_config.mtu = __cpu_to_virtio16(virtio_legacy_is_little_endian(), 1411 ETH_DATA_LEN); 1412 net_config.status = __cpu_to_virtio16(virtio_legacy_is_little_endian(), 1413 VIRTIO_NET_S_LINK_UP); 1414 mlxbf_tmfifo_get_cfg_mac(net_config.mac); 1415 rc = mlxbf_tmfifo_create_vdev(dev, fifo, VIRTIO_ID_NET, 1416 MLXBF_TMFIFO_NET_FEATURES, &net_config, 1417 sizeof(net_config)); 1418 if (rc) 1419 goto fail; 1420 1421 mod_timer(&fifo->timer, jiffies + MLXBF_TMFIFO_TIMER_INTERVAL); 1422 1423 /* Make all updates visible before setting the 'is_ready' flag. */ 1424 virtio_mb(false); 1425 1426 fifo->is_ready = true; 1427 return 0; 1428 1429 fail: 1430 mlxbf_tmfifo_cleanup(fifo); 1431 return rc; 1432 } 1433 1434 /* Device remove function. */ 1435 static void mlxbf_tmfifo_remove(struct platform_device *pdev) 1436 { 1437 struct mlxbf_tmfifo *fifo = platform_get_drvdata(pdev); 1438 1439 mlxbf_tmfifo_cleanup(fifo); 1440 } 1441 1442 static const struct acpi_device_id mlxbf_tmfifo_acpi_match[] = { 1443 { "MLNXBF01", 0 }, 1444 {} 1445 }; 1446 MODULE_DEVICE_TABLE(acpi, mlxbf_tmfifo_acpi_match); 1447 1448 static struct platform_driver mlxbf_tmfifo_driver = { 1449 .probe = mlxbf_tmfifo_probe, 1450 .remove = mlxbf_tmfifo_remove, 1451 .driver = { 1452 .name = "bf-tmfifo", 1453 .acpi_match_table = mlxbf_tmfifo_acpi_match, 1454 }, 1455 }; 1456 1457 module_platform_driver(mlxbf_tmfifo_driver); 1458 1459 MODULE_DESCRIPTION("Mellanox BlueField SoC TmFifo Driver"); 1460 MODULE_LICENSE("GPL v2"); 1461 MODULE_AUTHOR("Mellanox Technologies"); 1462