1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Mellanox BlueField SoC TmFifo driver 4 * 5 * Copyright (C) 2019 Mellanox Technologies 6 */ 7 8 #include <linux/acpi.h> 9 #include <linux/bitfield.h> 10 #include <linux/circ_buf.h> 11 #include <linux/efi.h> 12 #include <linux/irq.h> 13 #include <linux/module.h> 14 #include <linux/mutex.h> 15 #include <linux/platform_device.h> 16 #include <linux/types.h> 17 18 #include <linux/virtio_config.h> 19 #include <linux/virtio_console.h> 20 #include <linux/virtio_ids.h> 21 #include <linux/virtio_net.h> 22 #include <linux/virtio_ring.h> 23 24 #include "mlxbf-tmfifo-regs.h" 25 26 /* Vring size. */ 27 #define MLXBF_TMFIFO_VRING_SIZE SZ_1K 28 29 /* Console Tx buffer size. */ 30 #define MLXBF_TMFIFO_CON_TX_BUF_SIZE SZ_32K 31 32 /* Console Tx buffer reserved space. */ 33 #define MLXBF_TMFIFO_CON_TX_BUF_RSV_SIZE 8 34 35 /* House-keeping timer interval. */ 36 #define MLXBF_TMFIFO_TIMER_INTERVAL (HZ / 10) 37 38 /* Virtual devices sharing the TM FIFO. */ 39 #define MLXBF_TMFIFO_VDEV_MAX (VIRTIO_ID_CONSOLE + 1) 40 41 /* 42 * Reserve 1/16 of TmFifo space, so console messages are not starved by 43 * the networking traffic. 44 */ 45 #define MLXBF_TMFIFO_RESERVE_RATIO 16 46 47 /* Message with data needs at least two words (for header & data). */ 48 #define MLXBF_TMFIFO_DATA_MIN_WORDS 2 49 50 /* Tx timeout in milliseconds. */ 51 #define TMFIFO_TX_TIMEOUT 2000 52 53 /* ACPI UID for BlueField-3. */ 54 #define TMFIFO_BF3_UID 1 55 56 struct mlxbf_tmfifo; 57 58 /** 59 * struct mlxbf_tmfifo_vring - Structure of the TmFifo virtual ring 60 * @va: virtual address of the ring 61 * @dma: dma address of the ring 62 * @vq: pointer to the virtio virtqueue 63 * @desc: current descriptor of the pending packet 64 * @desc_head: head descriptor of the pending packet 65 * @drop_desc: dummy desc for packet dropping 66 * @cur_len: processed length of the current descriptor 67 * @rem_len: remaining length of the pending packet 68 * @rem_padding: remaining bytes to send as paddings 69 * @pkt_len: total length of the pending packet 70 * @next_avail: next avail descriptor id 71 * @num: vring size (number of descriptors) 72 * @align: vring alignment size 73 * @index: vring index 74 * @vdev_id: vring virtio id (VIRTIO_ID_xxx) 75 * @tx_timeout: expire time of last tx packet 76 * @fifo: pointer to the tmfifo structure 77 */ 78 struct mlxbf_tmfifo_vring { 79 void *va; 80 dma_addr_t dma; 81 struct virtqueue *vq; 82 struct vring_desc *desc; 83 struct vring_desc *desc_head; 84 struct vring_desc drop_desc; 85 int cur_len; 86 int rem_len; 87 int rem_padding; 88 u32 pkt_len; 89 u16 next_avail; 90 int num; 91 int align; 92 int index; 93 int vdev_id; 94 unsigned long tx_timeout; 95 struct mlxbf_tmfifo *fifo; 96 }; 97 98 /* Check whether vring is in drop mode. */ 99 #define IS_VRING_DROP(_r) ({ \ 100 typeof(_r) (r) = (_r); \ 101 r->desc_head == &r->drop_desc; }) 102 103 /* A stub length to drop maximum length packet. */ 104 #define VRING_DROP_DESC_MAX_LEN GENMASK(15, 0) 105 106 /* Interrupt types. */ 107 enum { 108 MLXBF_TM_RX_LWM_IRQ, 109 MLXBF_TM_RX_HWM_IRQ, 110 MLXBF_TM_TX_LWM_IRQ, 111 MLXBF_TM_TX_HWM_IRQ, 112 MLXBF_TM_MAX_IRQ 113 }; 114 115 /* Ring types (Rx & Tx). */ 116 enum { 117 MLXBF_TMFIFO_VRING_RX, 118 MLXBF_TMFIFO_VRING_TX, 119 MLXBF_TMFIFO_VRING_MAX 120 }; 121 122 /** 123 * struct mlxbf_tmfifo_vdev - Structure of the TmFifo virtual device 124 * @vdev: virtio device, in which the vdev.id.device field has the 125 * VIRTIO_ID_xxx id to distinguish the virtual device. 126 * @status: status of the device 127 * @features: supported features of the device 128 * @vrings: array of tmfifo vrings of this device 129 * @config: non-anonymous union for cons and net 130 * @config.cons: virtual console config - 131 * select if vdev.id.device is VIRTIO_ID_CONSOLE 132 * @config.net: virtual network config - 133 * select if vdev.id.device is VIRTIO_ID_NET 134 * @tx_buf: tx buffer used to buffer data before writing into the FIFO 135 */ 136 struct mlxbf_tmfifo_vdev { 137 struct virtio_device vdev; 138 u8 status; 139 u64 features; 140 struct mlxbf_tmfifo_vring vrings[MLXBF_TMFIFO_VRING_MAX]; 141 union { 142 struct virtio_console_config cons; 143 struct virtio_net_config net; 144 } config; 145 struct circ_buf tx_buf; 146 }; 147 148 /** 149 * struct mlxbf_tmfifo_irq_info - Structure of the interrupt information 150 * @fifo: pointer to the tmfifo structure 151 * @irq: interrupt number 152 * @index: index into the interrupt array 153 */ 154 struct mlxbf_tmfifo_irq_info { 155 struct mlxbf_tmfifo *fifo; 156 int irq; 157 int index; 158 }; 159 160 /** 161 * struct mlxbf_tmfifo_io - Structure of the TmFifo IO resource (for both rx & tx) 162 * @ctl: control register offset (TMFIFO_RX_CTL / TMFIFO_TX_CTL) 163 * @sts: status register offset (TMFIFO_RX_STS / TMFIFO_TX_STS) 164 * @data: data register offset (TMFIFO_RX_DATA / TMFIFO_TX_DATA) 165 */ 166 struct mlxbf_tmfifo_io { 167 void __iomem *ctl; 168 void __iomem *sts; 169 void __iomem *data; 170 }; 171 172 /** 173 * struct mlxbf_tmfifo - Structure of the TmFifo 174 * @vdev: array of the virtual devices running over the TmFifo 175 * @lock: lock to protect the TmFifo access 176 * @res0: mapped resource block 0 177 * @res1: mapped resource block 1 178 * @rx: rx io resource 179 * @tx: tx io resource 180 * @rx_fifo_size: number of entries of the Rx FIFO 181 * @tx_fifo_size: number of entries of the Tx FIFO 182 * @pend_events: pending bits for deferred events 183 * @irq_info: interrupt information 184 * @work: work struct for deferred process 185 * @timer: background timer 186 * @vring: Tx/Rx ring 187 * @spin_lock: Tx/Rx spin lock 188 * @is_ready: ready flag 189 */ 190 struct mlxbf_tmfifo { 191 struct mlxbf_tmfifo_vdev *vdev[MLXBF_TMFIFO_VDEV_MAX]; 192 struct mutex lock; /* TmFifo lock */ 193 void __iomem *res0; 194 void __iomem *res1; 195 struct mlxbf_tmfifo_io rx; 196 struct mlxbf_tmfifo_io tx; 197 int rx_fifo_size; 198 int tx_fifo_size; 199 unsigned long pend_events; 200 struct mlxbf_tmfifo_irq_info irq_info[MLXBF_TM_MAX_IRQ]; 201 struct work_struct work; 202 struct timer_list timer; 203 struct mlxbf_tmfifo_vring *vring[2]; 204 spinlock_t spin_lock[2]; /* spin lock */ 205 bool is_ready; 206 }; 207 208 /** 209 * struct mlxbf_tmfifo_msg_hdr - Structure of the TmFifo message header 210 * @type: message type 211 * @len: payload length in network byte order. Messages sent into the FIFO 212 * will be read by the other side as data stream in the same byte order. 213 * The length needs to be encoded into network order so both sides 214 * could understand it. 215 */ 216 struct mlxbf_tmfifo_msg_hdr { 217 u8 type; 218 __be16 len; 219 /* private: */ 220 u8 unused[5]; 221 } __packed __aligned(sizeof(u64)); 222 223 /* 224 * Default MAC. 225 * This MAC address will be read from EFI persistent variable if configured. 226 * It can also be reconfigured with standard Linux tools. 227 */ 228 static u8 mlxbf_tmfifo_net_default_mac[ETH_ALEN] = { 229 0x00, 0x1A, 0xCA, 0xFF, 0xFF, 0x01 230 }; 231 232 /* EFI variable name of the MAC address. */ 233 static efi_char16_t mlxbf_tmfifo_efi_name[] = L"RshimMacAddr"; 234 235 /* Maximum L2 header length. */ 236 #define MLXBF_TMFIFO_NET_L2_OVERHEAD (ETH_HLEN + VLAN_HLEN) 237 238 /* Supported virtio-net features. */ 239 #define MLXBF_TMFIFO_NET_FEATURES \ 240 (BIT_ULL(VIRTIO_NET_F_MTU) | BIT_ULL(VIRTIO_NET_F_STATUS) | \ 241 BIT_ULL(VIRTIO_NET_F_MAC)) 242 243 #define mlxbf_vdev_to_tmfifo(d) container_of(d, struct mlxbf_tmfifo_vdev, vdev) 244 245 /* Free vrings of the FIFO device. */ 246 static void mlxbf_tmfifo_free_vrings(struct mlxbf_tmfifo *fifo, 247 struct mlxbf_tmfifo_vdev *tm_vdev) 248 { 249 struct mlxbf_tmfifo_vring *vring; 250 int i, size; 251 252 for (i = 0; i < ARRAY_SIZE(tm_vdev->vrings); i++) { 253 vring = &tm_vdev->vrings[i]; 254 if (vring->va) { 255 size = vring_size(vring->num, vring->align); 256 dma_free_coherent(tm_vdev->vdev.dev.parent, size, 257 vring->va, vring->dma); 258 vring->va = NULL; 259 if (vring->vq) { 260 vring_del_virtqueue(vring->vq); 261 vring->vq = NULL; 262 } 263 } 264 } 265 } 266 267 /* Allocate vrings for the FIFO. */ 268 static int mlxbf_tmfifo_alloc_vrings(struct mlxbf_tmfifo *fifo, 269 struct mlxbf_tmfifo_vdev *tm_vdev) 270 { 271 struct mlxbf_tmfifo_vring *vring; 272 struct device *dev; 273 dma_addr_t dma; 274 int i, size; 275 void *va; 276 277 for (i = 0; i < ARRAY_SIZE(tm_vdev->vrings); i++) { 278 vring = &tm_vdev->vrings[i]; 279 vring->fifo = fifo; 280 vring->num = MLXBF_TMFIFO_VRING_SIZE; 281 vring->align = SMP_CACHE_BYTES; 282 vring->index = i; 283 vring->vdev_id = tm_vdev->vdev.id.device; 284 vring->drop_desc.len = VRING_DROP_DESC_MAX_LEN; 285 dev = &tm_vdev->vdev.dev; 286 287 size = vring_size(vring->num, vring->align); 288 va = dma_alloc_coherent(dev->parent, size, &dma, GFP_KERNEL); 289 if (!va) { 290 mlxbf_tmfifo_free_vrings(fifo, tm_vdev); 291 dev_err(dev->parent, "dma_alloc_coherent failed\n"); 292 return -ENOMEM; 293 } 294 295 vring->va = va; 296 vring->dma = dma; 297 } 298 299 return 0; 300 } 301 302 /* Disable interrupts of the FIFO device. */ 303 static void mlxbf_tmfifo_disable_irqs(struct mlxbf_tmfifo *fifo) 304 { 305 int i, irq; 306 307 for (i = 0; i < MLXBF_TM_MAX_IRQ; i++) { 308 irq = fifo->irq_info[i].irq; 309 fifo->irq_info[i].irq = 0; 310 disable_irq(irq); 311 } 312 } 313 314 /* Interrupt handler. */ 315 static irqreturn_t mlxbf_tmfifo_irq_handler(int irq, void *arg) 316 { 317 struct mlxbf_tmfifo_irq_info *irq_info = arg; 318 319 if (!test_and_set_bit(irq_info->index, &irq_info->fifo->pend_events)) 320 schedule_work(&irq_info->fifo->work); 321 322 return IRQ_HANDLED; 323 } 324 325 /* Get the next packet descriptor from the vring. */ 326 static struct vring_desc * 327 mlxbf_tmfifo_get_next_desc(struct mlxbf_tmfifo_vring *vring) 328 { 329 const struct vring *vr = virtqueue_get_vring(vring->vq); 330 struct virtio_device *vdev = vring->vq->vdev; 331 unsigned int idx, head; 332 333 if (vring->next_avail == virtio16_to_cpu(vdev, vr->avail->idx)) 334 return NULL; 335 336 /* Make sure 'avail->idx' is visible already. */ 337 virtio_rmb(false); 338 339 idx = vring->next_avail % vr->num; 340 head = virtio16_to_cpu(vdev, vr->avail->ring[idx]); 341 if (WARN_ON(head >= vr->num)) 342 return NULL; 343 344 vring->next_avail++; 345 346 return &vr->desc[head]; 347 } 348 349 /* Release virtio descriptor. */ 350 static void mlxbf_tmfifo_release_desc(struct mlxbf_tmfifo_vring *vring, 351 struct vring_desc *desc, u32 len) 352 { 353 const struct vring *vr = virtqueue_get_vring(vring->vq); 354 struct virtio_device *vdev = vring->vq->vdev; 355 u16 idx, vr_idx; 356 357 vr_idx = virtio16_to_cpu(vdev, vr->used->idx); 358 idx = vr_idx % vr->num; 359 vr->used->ring[idx].id = cpu_to_virtio32(vdev, desc - vr->desc); 360 vr->used->ring[idx].len = cpu_to_virtio32(vdev, len); 361 362 /* 363 * Virtio could poll and check the 'idx' to decide whether the desc is 364 * done or not. Add a memory barrier here to make sure the update above 365 * completes before updating the idx. 366 */ 367 virtio_mb(false); 368 vr->used->idx = cpu_to_virtio16(vdev, vr_idx + 1); 369 } 370 371 /* Get the total length of the descriptor chain. */ 372 static u32 mlxbf_tmfifo_get_pkt_len(struct mlxbf_tmfifo_vring *vring, 373 struct vring_desc *desc) 374 { 375 const struct vring *vr = virtqueue_get_vring(vring->vq); 376 struct virtio_device *vdev = vring->vq->vdev; 377 u32 len = 0, idx; 378 379 while (desc) { 380 len += virtio32_to_cpu(vdev, desc->len); 381 if (!(virtio16_to_cpu(vdev, desc->flags) & VRING_DESC_F_NEXT)) 382 break; 383 idx = virtio16_to_cpu(vdev, desc->next); 384 desc = &vr->desc[idx]; 385 } 386 387 return len; 388 } 389 390 static void mlxbf_tmfifo_release_pkt(struct mlxbf_tmfifo_vring *vring) 391 { 392 struct vring_desc *desc_head; 393 u32 len = 0; 394 395 if (vring->desc_head) { 396 desc_head = vring->desc_head; 397 len = vring->pkt_len; 398 } else { 399 desc_head = mlxbf_tmfifo_get_next_desc(vring); 400 len = mlxbf_tmfifo_get_pkt_len(vring, desc_head); 401 } 402 403 if (desc_head) 404 mlxbf_tmfifo_release_desc(vring, desc_head, len); 405 406 vring->pkt_len = 0; 407 vring->desc = NULL; 408 vring->desc_head = NULL; 409 } 410 411 static void mlxbf_tmfifo_init_net_desc(struct mlxbf_tmfifo_vring *vring, 412 struct vring_desc *desc, bool is_rx) 413 { 414 struct virtio_device *vdev = vring->vq->vdev; 415 struct virtio_net_hdr *net_hdr; 416 417 net_hdr = phys_to_virt(virtio64_to_cpu(vdev, desc->addr)); 418 memset(net_hdr, 0, sizeof(*net_hdr)); 419 } 420 421 /* Get and initialize the next packet. */ 422 static struct vring_desc * 423 mlxbf_tmfifo_get_next_pkt(struct mlxbf_tmfifo_vring *vring, bool is_rx) 424 { 425 struct vring_desc *desc; 426 427 desc = mlxbf_tmfifo_get_next_desc(vring); 428 if (desc && is_rx && vring->vdev_id == VIRTIO_ID_NET) 429 mlxbf_tmfifo_init_net_desc(vring, desc, is_rx); 430 431 vring->desc_head = desc; 432 vring->desc = desc; 433 434 return desc; 435 } 436 437 /* House-keeping timer. */ 438 static void mlxbf_tmfifo_timer(struct timer_list *t) 439 { 440 struct mlxbf_tmfifo *fifo = container_of(t, struct mlxbf_tmfifo, timer); 441 int rx, tx; 442 443 rx = !test_and_set_bit(MLXBF_TM_RX_HWM_IRQ, &fifo->pend_events); 444 tx = !test_and_set_bit(MLXBF_TM_TX_LWM_IRQ, &fifo->pend_events); 445 446 if (rx || tx) 447 schedule_work(&fifo->work); 448 449 mod_timer(&fifo->timer, jiffies + MLXBF_TMFIFO_TIMER_INTERVAL); 450 } 451 452 /* Copy one console packet into the output buffer. */ 453 static void mlxbf_tmfifo_console_output_one(struct mlxbf_tmfifo_vdev *cons, 454 struct mlxbf_tmfifo_vring *vring, 455 struct vring_desc *desc) 456 { 457 const struct vring *vr = virtqueue_get_vring(vring->vq); 458 struct virtio_device *vdev = &cons->vdev; 459 u32 len, idx, seg; 460 void *addr; 461 462 while (desc) { 463 addr = phys_to_virt(virtio64_to_cpu(vdev, desc->addr)); 464 len = virtio32_to_cpu(vdev, desc->len); 465 466 seg = CIRC_SPACE_TO_END(cons->tx_buf.head, cons->tx_buf.tail, 467 MLXBF_TMFIFO_CON_TX_BUF_SIZE); 468 if (len <= seg) { 469 memcpy(cons->tx_buf.buf + cons->tx_buf.head, addr, len); 470 } else { 471 memcpy(cons->tx_buf.buf + cons->tx_buf.head, addr, seg); 472 addr += seg; 473 memcpy(cons->tx_buf.buf, addr, len - seg); 474 } 475 cons->tx_buf.head = (cons->tx_buf.head + len) % 476 MLXBF_TMFIFO_CON_TX_BUF_SIZE; 477 478 if (!(virtio16_to_cpu(vdev, desc->flags) & VRING_DESC_F_NEXT)) 479 break; 480 idx = virtio16_to_cpu(vdev, desc->next); 481 desc = &vr->desc[idx]; 482 } 483 } 484 485 /* Copy console data into the output buffer. */ 486 static void mlxbf_tmfifo_console_output(struct mlxbf_tmfifo_vdev *cons, 487 struct mlxbf_tmfifo_vring *vring) 488 { 489 struct vring_desc *desc; 490 u32 len, avail; 491 492 desc = mlxbf_tmfifo_get_next_desc(vring); 493 while (desc) { 494 /* Release the packet if not enough space. */ 495 len = mlxbf_tmfifo_get_pkt_len(vring, desc); 496 avail = CIRC_SPACE(cons->tx_buf.head, cons->tx_buf.tail, 497 MLXBF_TMFIFO_CON_TX_BUF_SIZE); 498 if (len + MLXBF_TMFIFO_CON_TX_BUF_RSV_SIZE > avail) { 499 mlxbf_tmfifo_release_desc(vring, desc, len); 500 break; 501 } 502 503 mlxbf_tmfifo_console_output_one(cons, vring, desc); 504 mlxbf_tmfifo_release_desc(vring, desc, len); 505 desc = mlxbf_tmfifo_get_next_desc(vring); 506 } 507 } 508 509 /* Get the number of available words in Rx FIFO for receiving. */ 510 static int mlxbf_tmfifo_get_rx_avail(struct mlxbf_tmfifo *fifo) 511 { 512 u64 sts; 513 514 sts = readq(fifo->rx.sts); 515 return FIELD_GET(MLXBF_TMFIFO_RX_STS__COUNT_MASK, sts); 516 } 517 518 /* Get the number of available words in the TmFifo for sending. */ 519 static int mlxbf_tmfifo_get_tx_avail(struct mlxbf_tmfifo *fifo, int vdev_id) 520 { 521 int tx_reserve; 522 u32 count; 523 u64 sts; 524 525 /* Reserve some room in FIFO for console messages. */ 526 if (vdev_id == VIRTIO_ID_NET) 527 tx_reserve = fifo->tx_fifo_size / MLXBF_TMFIFO_RESERVE_RATIO; 528 else 529 tx_reserve = 1; 530 531 sts = readq(fifo->tx.sts); 532 count = FIELD_GET(MLXBF_TMFIFO_TX_STS__COUNT_MASK, sts); 533 return fifo->tx_fifo_size - tx_reserve - count; 534 } 535 536 /* Console Tx (move data from the output buffer into the TmFifo). */ 537 static void mlxbf_tmfifo_console_tx(struct mlxbf_tmfifo *fifo, int avail) 538 { 539 struct mlxbf_tmfifo_msg_hdr hdr; 540 struct mlxbf_tmfifo_vdev *cons; 541 unsigned long flags; 542 int size, seg; 543 void *addr; 544 u64 data; 545 546 /* Return if not enough space available. */ 547 if (avail < MLXBF_TMFIFO_DATA_MIN_WORDS) 548 return; 549 550 cons = fifo->vdev[VIRTIO_ID_CONSOLE]; 551 if (!cons || !cons->tx_buf.buf) 552 return; 553 554 /* Return if no data to send. */ 555 size = CIRC_CNT(cons->tx_buf.head, cons->tx_buf.tail, 556 MLXBF_TMFIFO_CON_TX_BUF_SIZE); 557 if (size == 0) 558 return; 559 560 /* Adjust the size to available space. */ 561 if (size + sizeof(hdr) > avail * sizeof(u64)) 562 size = avail * sizeof(u64) - sizeof(hdr); 563 564 /* Write header. */ 565 hdr.type = VIRTIO_ID_CONSOLE; 566 hdr.len = htons(size); 567 writeq(*(u64 *)&hdr, fifo->tx.data); 568 569 /* Use spin-lock to protect the 'cons->tx_buf'. */ 570 spin_lock_irqsave(&fifo->spin_lock[0], flags); 571 572 while (size > 0) { 573 addr = cons->tx_buf.buf + cons->tx_buf.tail; 574 575 seg = CIRC_CNT_TO_END(cons->tx_buf.head, cons->tx_buf.tail, 576 MLXBF_TMFIFO_CON_TX_BUF_SIZE); 577 if (seg >= sizeof(u64)) { 578 memcpy(&data, addr, sizeof(u64)); 579 } else { 580 memcpy(&data, addr, seg); 581 memcpy((u8 *)&data + seg, cons->tx_buf.buf, 582 sizeof(u64) - seg); 583 } 584 writeq(data, fifo->tx.data); 585 586 if (size >= sizeof(u64)) { 587 cons->tx_buf.tail = (cons->tx_buf.tail + sizeof(u64)) % 588 MLXBF_TMFIFO_CON_TX_BUF_SIZE; 589 size -= sizeof(u64); 590 } else { 591 cons->tx_buf.tail = (cons->tx_buf.tail + size) % 592 MLXBF_TMFIFO_CON_TX_BUF_SIZE; 593 size = 0; 594 } 595 } 596 597 spin_unlock_irqrestore(&fifo->spin_lock[0], flags); 598 } 599 600 /* Rx/Tx one word in the descriptor buffer. */ 601 static void mlxbf_tmfifo_rxtx_word(struct mlxbf_tmfifo_vring *vring, 602 struct vring_desc *desc, 603 bool is_rx, int len) 604 { 605 struct virtio_device *vdev = vring->vq->vdev; 606 struct mlxbf_tmfifo *fifo = vring->fifo; 607 void *addr; 608 u64 data; 609 610 /* Get the buffer address of this desc. */ 611 addr = phys_to_virt(virtio64_to_cpu(vdev, desc->addr)); 612 613 /* Read a word from FIFO for Rx. */ 614 if (is_rx) 615 data = readq(fifo->rx.data); 616 617 if (vring->cur_len + sizeof(u64) <= len) { 618 /* The whole word. */ 619 if (is_rx) { 620 if (!IS_VRING_DROP(vring)) 621 memcpy(addr + vring->cur_len, &data, 622 sizeof(u64)); 623 } else { 624 memcpy(&data, addr + vring->cur_len, 625 sizeof(u64)); 626 } 627 vring->cur_len += sizeof(u64); 628 } else { 629 /* Leftover bytes. */ 630 if (is_rx) { 631 if (!IS_VRING_DROP(vring)) 632 memcpy(addr + vring->cur_len, &data, 633 len - vring->cur_len); 634 } else { 635 data = 0; 636 memcpy(&data, addr + vring->cur_len, 637 len - vring->cur_len); 638 } 639 vring->cur_len = len; 640 } 641 642 /* Write the word into FIFO for Tx. */ 643 if (!is_rx) 644 writeq(data, fifo->tx.data); 645 } 646 647 /* 648 * Rx/Tx packet header. 649 * 650 * In Rx case, the packet might be found to belong to a different vring since 651 * the TmFifo is shared by different services. In such case, the 'vring_change' 652 * flag is set. 653 */ 654 static void mlxbf_tmfifo_rxtx_header(struct mlxbf_tmfifo_vring *vring, 655 struct vring_desc **desc, 656 bool is_rx, bool *vring_change) 657 { 658 struct mlxbf_tmfifo *fifo = vring->fifo; 659 struct virtio_net_config *config; 660 struct mlxbf_tmfifo_msg_hdr hdr; 661 int vdev_id, hdr_len; 662 bool drop_rx = false; 663 664 /* Read/Write packet header. */ 665 if (is_rx) { 666 /* Drain one word from the FIFO. */ 667 *(u64 *)&hdr = readq(fifo->rx.data); 668 669 /* Skip the length 0 packets (keepalive). */ 670 if (hdr.len == 0) 671 return; 672 673 /* Check packet type. */ 674 if (hdr.type == VIRTIO_ID_NET) { 675 vdev_id = VIRTIO_ID_NET; 676 hdr_len = sizeof(struct virtio_net_hdr); 677 config = &fifo->vdev[vdev_id]->config.net; 678 /* A legacy-only interface for now. */ 679 if (ntohs(hdr.len) > 680 __virtio16_to_cpu(virtio_legacy_is_little_endian(), 681 config->mtu) + 682 MLXBF_TMFIFO_NET_L2_OVERHEAD) 683 drop_rx = true; 684 } else { 685 vdev_id = VIRTIO_ID_CONSOLE; 686 hdr_len = 0; 687 } 688 689 /* 690 * Check whether the new packet still belongs to this vring. 691 * If not, update the pkt_len of the new vring. 692 */ 693 if (vdev_id != vring->vdev_id) { 694 struct mlxbf_tmfifo_vdev *tm_dev2 = fifo->vdev[vdev_id]; 695 696 if (!tm_dev2) 697 return; 698 vring->desc = *desc; 699 vring = &tm_dev2->vrings[MLXBF_TMFIFO_VRING_RX]; 700 *vring_change = true; 701 } 702 703 if (drop_rx && !IS_VRING_DROP(vring)) { 704 if (vring->desc_head) 705 mlxbf_tmfifo_release_pkt(vring); 706 *desc = &vring->drop_desc; 707 vring->desc_head = *desc; 708 vring->desc = *desc; 709 } 710 711 vring->pkt_len = ntohs(hdr.len) + hdr_len; 712 } else { 713 /* Network virtio has an extra header. */ 714 hdr_len = (vring->vdev_id == VIRTIO_ID_NET) ? 715 sizeof(struct virtio_net_hdr) : 0; 716 vring->pkt_len = mlxbf_tmfifo_get_pkt_len(vring, *desc); 717 hdr.type = (vring->vdev_id == VIRTIO_ID_NET) ? 718 VIRTIO_ID_NET : VIRTIO_ID_CONSOLE; 719 hdr.len = htons(vring->pkt_len - hdr_len); 720 writeq(*(u64 *)&hdr, fifo->tx.data); 721 } 722 723 vring->cur_len = hdr_len; 724 vring->rem_len = vring->pkt_len; 725 fifo->vring[is_rx] = vring; 726 } 727 728 /* 729 * Rx/Tx one descriptor. 730 * 731 * Return true to indicate more data available. 732 */ 733 static bool mlxbf_tmfifo_rxtx_one_desc(struct mlxbf_tmfifo_vring *vring, 734 bool is_rx, int *avail) 735 { 736 const struct vring *vr = virtqueue_get_vring(vring->vq); 737 struct mlxbf_tmfifo *fifo = vring->fifo; 738 struct virtio_device *vdev; 739 bool vring_change = false; 740 struct vring_desc *desc; 741 unsigned long flags; 742 u32 len, idx; 743 744 vdev = &fifo->vdev[vring->vdev_id]->vdev; 745 746 /* Get the descriptor of the next packet. */ 747 if (!vring->desc) { 748 desc = mlxbf_tmfifo_get_next_pkt(vring, is_rx); 749 if (!desc) { 750 /* Drop next Rx packet to avoid stuck. */ 751 if (is_rx) { 752 desc = &vring->drop_desc; 753 vring->desc_head = desc; 754 vring->desc = desc; 755 } else { 756 return false; 757 } 758 } 759 } else { 760 desc = vring->desc; 761 } 762 763 /* Beginning of a packet. Start to Rx/Tx packet header. */ 764 if (vring->pkt_len == 0) { 765 mlxbf_tmfifo_rxtx_header(vring, &desc, is_rx, &vring_change); 766 (*avail)--; 767 768 /* Return if new packet is for another ring. */ 769 if (vring_change) 770 return false; 771 goto mlxbf_tmfifo_desc_done; 772 } 773 774 /* Get the length of this desc. */ 775 len = virtio32_to_cpu(vdev, desc->len); 776 if (len > vring->rem_len) 777 len = vring->rem_len; 778 779 /* Rx/Tx one word (8 bytes) if not done. */ 780 if (vring->cur_len < len) { 781 mlxbf_tmfifo_rxtx_word(vring, desc, is_rx, len); 782 (*avail)--; 783 } 784 785 /* Check again whether it's done. */ 786 if (vring->cur_len == len) { 787 vring->cur_len = 0; 788 vring->rem_len -= len; 789 790 /* Get the next desc on the chain. */ 791 if (!IS_VRING_DROP(vring) && vring->rem_len > 0 && 792 (virtio16_to_cpu(vdev, desc->flags) & VRING_DESC_F_NEXT)) { 793 idx = virtio16_to_cpu(vdev, desc->next); 794 desc = &vr->desc[idx]; 795 goto mlxbf_tmfifo_desc_done; 796 } 797 798 /* Done and release the packet. */ 799 desc = NULL; 800 fifo->vring[is_rx] = NULL; 801 if (!IS_VRING_DROP(vring)) { 802 mlxbf_tmfifo_release_pkt(vring); 803 } else { 804 vring->pkt_len = 0; 805 vring->desc_head = NULL; 806 vring->desc = NULL; 807 return false; 808 } 809 810 /* 811 * Make sure the load/store are in order before 812 * returning back to virtio. 813 */ 814 virtio_mb(false); 815 816 /* Notify upper layer that packet is done. */ 817 spin_lock_irqsave(&fifo->spin_lock[is_rx], flags); 818 vring_interrupt(0, vring->vq); 819 spin_unlock_irqrestore(&fifo->spin_lock[is_rx], flags); 820 } 821 822 mlxbf_tmfifo_desc_done: 823 /* Save the current desc. */ 824 vring->desc = desc; 825 826 return true; 827 } 828 829 static void mlxbf_tmfifo_check_tx_timeout(struct mlxbf_tmfifo_vring *vring) 830 { 831 unsigned long flags; 832 833 /* Only handle Tx timeout for network vdev. */ 834 if (vring->vdev_id != VIRTIO_ID_NET) 835 return; 836 837 /* Initialize the timeout or return if not expired. */ 838 if (!vring->tx_timeout) { 839 /* Initialize the timeout. */ 840 vring->tx_timeout = jiffies + 841 msecs_to_jiffies(TMFIFO_TX_TIMEOUT); 842 return; 843 } else if (time_before(jiffies, vring->tx_timeout)) { 844 /* Return if not timeout yet. */ 845 return; 846 } 847 848 /* 849 * Drop the packet after timeout. The outstanding packet is 850 * released and the remaining bytes will be sent with padding byte 0x00 851 * as a recovery. On the peer(host) side, the padding bytes 0x00 will be 852 * either dropped directly, or appended into existing outstanding packet 853 * thus dropped as corrupted network packet. 854 */ 855 vring->rem_padding = round_up(vring->rem_len, sizeof(u64)); 856 mlxbf_tmfifo_release_pkt(vring); 857 vring->cur_len = 0; 858 vring->rem_len = 0; 859 vring->fifo->vring[0] = NULL; 860 861 /* 862 * Make sure the load/store are in order before 863 * returning back to virtio. 864 */ 865 virtio_mb(false); 866 867 /* Notify upper layer. */ 868 spin_lock_irqsave(&vring->fifo->spin_lock[0], flags); 869 vring_interrupt(0, vring->vq); 870 spin_unlock_irqrestore(&vring->fifo->spin_lock[0], flags); 871 } 872 873 /* Rx & Tx processing of a queue. */ 874 static void mlxbf_tmfifo_rxtx(struct mlxbf_tmfifo_vring *vring, bool is_rx) 875 { 876 int avail = 0, devid = vring->vdev_id; 877 struct mlxbf_tmfifo *fifo; 878 bool more; 879 880 fifo = vring->fifo; 881 882 /* Return if vdev is not ready. */ 883 if (!fifo || !fifo->vdev[devid]) 884 return; 885 886 /* Return if another vring is running. */ 887 if (fifo->vring[is_rx] && fifo->vring[is_rx] != vring) 888 return; 889 890 /* Only handle console and network for now. */ 891 if (WARN_ON(devid != VIRTIO_ID_NET && devid != VIRTIO_ID_CONSOLE)) 892 return; 893 894 do { 895 retry: 896 /* Get available FIFO space. */ 897 if (avail == 0) { 898 if (is_rx) 899 avail = mlxbf_tmfifo_get_rx_avail(fifo); 900 else 901 avail = mlxbf_tmfifo_get_tx_avail(fifo, devid); 902 if (avail <= 0) 903 break; 904 } 905 906 /* Insert paddings for discarded Tx packet. */ 907 if (!is_rx) { 908 vring->tx_timeout = 0; 909 while (vring->rem_padding >= sizeof(u64)) { 910 writeq(0, vring->fifo->tx.data); 911 vring->rem_padding -= sizeof(u64); 912 if (--avail == 0) 913 goto retry; 914 } 915 } 916 917 /* Console output always comes from the Tx buffer. */ 918 if (!is_rx && devid == VIRTIO_ID_CONSOLE) { 919 mlxbf_tmfifo_console_tx(fifo, avail); 920 break; 921 } 922 923 /* Handle one descriptor. */ 924 more = mlxbf_tmfifo_rxtx_one_desc(vring, is_rx, &avail); 925 } while (more); 926 927 /* Check Tx timeout. */ 928 if (avail <= 0 && !is_rx) 929 mlxbf_tmfifo_check_tx_timeout(vring); 930 } 931 932 /* Handle Rx or Tx queues. */ 933 static void mlxbf_tmfifo_work_rxtx(struct mlxbf_tmfifo *fifo, int queue_id, 934 int irq_id, bool is_rx) 935 { 936 struct mlxbf_tmfifo_vdev *tm_vdev; 937 struct mlxbf_tmfifo_vring *vring; 938 int i; 939 940 if (!test_and_clear_bit(irq_id, &fifo->pend_events) || 941 !fifo->irq_info[irq_id].irq) 942 return; 943 944 for (i = 0; i < MLXBF_TMFIFO_VDEV_MAX; i++) { 945 tm_vdev = fifo->vdev[i]; 946 if (tm_vdev) { 947 vring = &tm_vdev->vrings[queue_id]; 948 if (vring->vq) 949 mlxbf_tmfifo_rxtx(vring, is_rx); 950 } 951 } 952 } 953 954 /* Work handler for Rx and Tx case. */ 955 static void mlxbf_tmfifo_work_handler(struct work_struct *work) 956 { 957 struct mlxbf_tmfifo *fifo; 958 959 fifo = container_of(work, struct mlxbf_tmfifo, work); 960 if (!fifo->is_ready) 961 return; 962 963 mutex_lock(&fifo->lock); 964 965 /* Tx (Send data to the TmFifo). */ 966 mlxbf_tmfifo_work_rxtx(fifo, MLXBF_TMFIFO_VRING_TX, 967 MLXBF_TM_TX_LWM_IRQ, false); 968 969 /* Rx (Receive data from the TmFifo). */ 970 mlxbf_tmfifo_work_rxtx(fifo, MLXBF_TMFIFO_VRING_RX, 971 MLXBF_TM_RX_HWM_IRQ, true); 972 973 mutex_unlock(&fifo->lock); 974 } 975 976 /* The notify function is called when new buffers are posted. */ 977 static bool mlxbf_tmfifo_virtio_notify(struct virtqueue *vq) 978 { 979 struct mlxbf_tmfifo_vring *vring = vq->priv; 980 struct mlxbf_tmfifo_vdev *tm_vdev; 981 struct mlxbf_tmfifo *fifo; 982 unsigned long flags; 983 984 fifo = vring->fifo; 985 986 /* 987 * Virtio maintains vrings in pairs, even number ring for Rx 988 * and odd number ring for Tx. 989 */ 990 if (vring->index & BIT(0)) { 991 /* 992 * Console could make blocking call with interrupts disabled. 993 * In such case, the vring needs to be served right away. For 994 * other cases, just set the TX LWM bit to start Tx in the 995 * worker handler. 996 */ 997 if (vring->vdev_id == VIRTIO_ID_CONSOLE) { 998 spin_lock_irqsave(&fifo->spin_lock[0], flags); 999 tm_vdev = fifo->vdev[VIRTIO_ID_CONSOLE]; 1000 mlxbf_tmfifo_console_output(tm_vdev, vring); 1001 spin_unlock_irqrestore(&fifo->spin_lock[0], flags); 1002 set_bit(MLXBF_TM_TX_LWM_IRQ, &fifo->pend_events); 1003 } else if (test_and_set_bit(MLXBF_TM_TX_LWM_IRQ, 1004 &fifo->pend_events)) { 1005 return true; 1006 } 1007 } else { 1008 if (test_and_set_bit(MLXBF_TM_RX_HWM_IRQ, &fifo->pend_events)) 1009 return true; 1010 } 1011 1012 schedule_work(&fifo->work); 1013 1014 return true; 1015 } 1016 1017 /* Get the array of feature bits for this device. */ 1018 static u64 mlxbf_tmfifo_virtio_get_features(struct virtio_device *vdev) 1019 { 1020 struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev); 1021 1022 return tm_vdev->features; 1023 } 1024 1025 /* Confirm device features to use. */ 1026 static int mlxbf_tmfifo_virtio_finalize_features(struct virtio_device *vdev) 1027 { 1028 struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev); 1029 1030 tm_vdev->features = vdev->features; 1031 1032 return 0; 1033 } 1034 1035 /* Free virtqueues found by find_vqs(). */ 1036 static void mlxbf_tmfifo_virtio_del_vqs(struct virtio_device *vdev) 1037 { 1038 struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev); 1039 struct mlxbf_tmfifo_vring *vring; 1040 struct virtqueue *vq; 1041 int i; 1042 1043 for (i = 0; i < ARRAY_SIZE(tm_vdev->vrings); i++) { 1044 vring = &tm_vdev->vrings[i]; 1045 1046 /* Release the pending packet. */ 1047 if (vring->desc) 1048 mlxbf_tmfifo_release_pkt(vring); 1049 vq = vring->vq; 1050 if (vq) { 1051 vring->vq = NULL; 1052 vring_del_virtqueue(vq); 1053 } 1054 } 1055 } 1056 1057 /* Create and initialize the virtual queues. */ 1058 static int mlxbf_tmfifo_virtio_find_vqs(struct virtio_device *vdev, 1059 unsigned int nvqs, 1060 struct virtqueue *vqs[], 1061 struct virtqueue_info vqs_info[], 1062 struct irq_affinity *desc) 1063 { 1064 struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev); 1065 struct mlxbf_tmfifo_vring *vring; 1066 struct virtqueue *vq; 1067 int i, ret, size; 1068 1069 if (nvqs > ARRAY_SIZE(tm_vdev->vrings)) 1070 return -EINVAL; 1071 1072 for (i = 0; i < nvqs; ++i) { 1073 struct virtqueue_info *vqi = &vqs_info[i]; 1074 1075 if (!vqi->name) { 1076 ret = -EINVAL; 1077 goto error; 1078 } 1079 vring = &tm_vdev->vrings[i]; 1080 1081 /* zero vring */ 1082 size = vring_size(vring->num, vring->align); 1083 memset(vring->va, 0, size); 1084 vq = vring_new_virtqueue(i, vring->num, vring->align, vdev, 1085 false, false, vring->va, 1086 mlxbf_tmfifo_virtio_notify, 1087 vqi->callback, vqi->name); 1088 if (!vq) { 1089 dev_err(&vdev->dev, "vring_new_virtqueue failed\n"); 1090 ret = -ENOMEM; 1091 goto error; 1092 } 1093 1094 vq->num_max = vring->num; 1095 1096 vq->priv = vring; 1097 1098 /* Make vq update visible before using it. */ 1099 virtio_mb(false); 1100 1101 vqs[i] = vq; 1102 vring->vq = vq; 1103 } 1104 1105 return 0; 1106 1107 error: 1108 mlxbf_tmfifo_virtio_del_vqs(vdev); 1109 return ret; 1110 } 1111 1112 /* Read the status byte. */ 1113 static u8 mlxbf_tmfifo_virtio_get_status(struct virtio_device *vdev) 1114 { 1115 struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev); 1116 1117 return tm_vdev->status; 1118 } 1119 1120 /* Write the status byte. */ 1121 static void mlxbf_tmfifo_virtio_set_status(struct virtio_device *vdev, 1122 u8 status) 1123 { 1124 struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev); 1125 1126 tm_vdev->status = status; 1127 } 1128 1129 /* Reset the device. Not much here for now. */ 1130 static void mlxbf_tmfifo_virtio_reset(struct virtio_device *vdev) 1131 { 1132 struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev); 1133 1134 tm_vdev->status = 0; 1135 } 1136 1137 /* Read the value of a configuration field. */ 1138 static void mlxbf_tmfifo_virtio_get(struct virtio_device *vdev, 1139 unsigned int offset, 1140 void *buf, 1141 unsigned int len) 1142 { 1143 struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev); 1144 1145 if ((u64)offset + len > sizeof(tm_vdev->config)) 1146 return; 1147 1148 memcpy(buf, (u8 *)&tm_vdev->config + offset, len); 1149 } 1150 1151 /* Write the value of a configuration field. */ 1152 static void mlxbf_tmfifo_virtio_set(struct virtio_device *vdev, 1153 unsigned int offset, 1154 const void *buf, 1155 unsigned int len) 1156 { 1157 struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev); 1158 1159 if ((u64)offset + len > sizeof(tm_vdev->config)) 1160 return; 1161 1162 memcpy((u8 *)&tm_vdev->config + offset, buf, len); 1163 } 1164 1165 static void tmfifo_virtio_dev_release(struct device *device) 1166 { 1167 struct virtio_device *vdev = 1168 container_of(device, struct virtio_device, dev); 1169 struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev); 1170 1171 kfree(tm_vdev); 1172 } 1173 1174 /* Virtio config operations. */ 1175 static const struct virtio_config_ops mlxbf_tmfifo_virtio_config_ops = { 1176 .get_features = mlxbf_tmfifo_virtio_get_features, 1177 .finalize_features = mlxbf_tmfifo_virtio_finalize_features, 1178 .find_vqs = mlxbf_tmfifo_virtio_find_vqs, 1179 .del_vqs = mlxbf_tmfifo_virtio_del_vqs, 1180 .reset = mlxbf_tmfifo_virtio_reset, 1181 .set_status = mlxbf_tmfifo_virtio_set_status, 1182 .get_status = mlxbf_tmfifo_virtio_get_status, 1183 .get = mlxbf_tmfifo_virtio_get, 1184 .set = mlxbf_tmfifo_virtio_set, 1185 }; 1186 1187 /* Create vdev for the FIFO. */ 1188 static int mlxbf_tmfifo_create_vdev(struct device *dev, 1189 struct mlxbf_tmfifo *fifo, 1190 int vdev_id, u64 features, 1191 void *config, u32 size) 1192 { 1193 struct mlxbf_tmfifo_vdev *tm_vdev, *reg_dev = NULL; 1194 int ret; 1195 1196 mutex_lock(&fifo->lock); 1197 1198 tm_vdev = fifo->vdev[vdev_id]; 1199 if (tm_vdev) { 1200 dev_err(dev, "vdev %d already exists\n", vdev_id); 1201 ret = -EEXIST; 1202 goto fail; 1203 } 1204 1205 tm_vdev = kzalloc(sizeof(*tm_vdev), GFP_KERNEL); 1206 if (!tm_vdev) { 1207 ret = -ENOMEM; 1208 goto fail; 1209 } 1210 1211 tm_vdev->vdev.id.device = vdev_id; 1212 tm_vdev->vdev.config = &mlxbf_tmfifo_virtio_config_ops; 1213 tm_vdev->vdev.dev.parent = dev; 1214 tm_vdev->vdev.dev.release = tmfifo_virtio_dev_release; 1215 tm_vdev->features = features; 1216 if (config) 1217 memcpy(&tm_vdev->config, config, size); 1218 1219 if (mlxbf_tmfifo_alloc_vrings(fifo, tm_vdev)) { 1220 dev_err(dev, "unable to allocate vring\n"); 1221 ret = -ENOMEM; 1222 goto vdev_fail; 1223 } 1224 1225 /* Allocate an output buffer for the console device. */ 1226 if (vdev_id == VIRTIO_ID_CONSOLE) 1227 tm_vdev->tx_buf.buf = devm_kmalloc(dev, 1228 MLXBF_TMFIFO_CON_TX_BUF_SIZE, 1229 GFP_KERNEL); 1230 fifo->vdev[vdev_id] = tm_vdev; 1231 1232 /* Register the virtio device. */ 1233 ret = register_virtio_device(&tm_vdev->vdev); 1234 reg_dev = tm_vdev; 1235 if (ret) { 1236 dev_err(dev, "register_virtio_device failed\n"); 1237 goto vdev_fail; 1238 } 1239 1240 mutex_unlock(&fifo->lock); 1241 return 0; 1242 1243 vdev_fail: 1244 mlxbf_tmfifo_free_vrings(fifo, tm_vdev); 1245 fifo->vdev[vdev_id] = NULL; 1246 if (reg_dev) 1247 put_device(&tm_vdev->vdev.dev); 1248 else 1249 kfree(tm_vdev); 1250 fail: 1251 mutex_unlock(&fifo->lock); 1252 return ret; 1253 } 1254 1255 /* Delete vdev for the FIFO. */ 1256 static int mlxbf_tmfifo_delete_vdev(struct mlxbf_tmfifo *fifo, int vdev_id) 1257 { 1258 struct mlxbf_tmfifo_vdev *tm_vdev; 1259 1260 mutex_lock(&fifo->lock); 1261 1262 /* Unregister vdev. */ 1263 tm_vdev = fifo->vdev[vdev_id]; 1264 if (tm_vdev) { 1265 unregister_virtio_device(&tm_vdev->vdev); 1266 mlxbf_tmfifo_free_vrings(fifo, tm_vdev); 1267 fifo->vdev[vdev_id] = NULL; 1268 } 1269 1270 mutex_unlock(&fifo->lock); 1271 1272 return 0; 1273 } 1274 1275 /* Read the configured network MAC address from efi variable. */ 1276 static void mlxbf_tmfifo_get_cfg_mac(u8 *mac) 1277 { 1278 efi_guid_t guid = EFI_GLOBAL_VARIABLE_GUID; 1279 unsigned long size = ETH_ALEN; 1280 u8 buf[ETH_ALEN]; 1281 efi_status_t rc; 1282 1283 rc = efi.get_variable(mlxbf_tmfifo_efi_name, &guid, NULL, &size, buf); 1284 if (rc == EFI_SUCCESS && size == ETH_ALEN) 1285 ether_addr_copy(mac, buf); 1286 else 1287 ether_addr_copy(mac, mlxbf_tmfifo_net_default_mac); 1288 } 1289 1290 /* Set TmFifo thresolds which is used to trigger interrupts. */ 1291 static void mlxbf_tmfifo_set_threshold(struct mlxbf_tmfifo *fifo) 1292 { 1293 u64 ctl; 1294 1295 /* Get Tx FIFO size and set the low/high watermark. */ 1296 ctl = readq(fifo->tx.ctl); 1297 fifo->tx_fifo_size = 1298 FIELD_GET(MLXBF_TMFIFO_TX_CTL__MAX_ENTRIES_MASK, ctl); 1299 ctl = (ctl & ~MLXBF_TMFIFO_TX_CTL__LWM_MASK) | 1300 FIELD_PREP(MLXBF_TMFIFO_TX_CTL__LWM_MASK, 1301 fifo->tx_fifo_size / 2); 1302 ctl = (ctl & ~MLXBF_TMFIFO_TX_CTL__HWM_MASK) | 1303 FIELD_PREP(MLXBF_TMFIFO_TX_CTL__HWM_MASK, 1304 fifo->tx_fifo_size - 1); 1305 writeq(ctl, fifo->tx.ctl); 1306 1307 /* Get Rx FIFO size and set the low/high watermark. */ 1308 ctl = readq(fifo->rx.ctl); 1309 fifo->rx_fifo_size = 1310 FIELD_GET(MLXBF_TMFIFO_RX_CTL__MAX_ENTRIES_MASK, ctl); 1311 ctl = (ctl & ~MLXBF_TMFIFO_RX_CTL__LWM_MASK) | 1312 FIELD_PREP(MLXBF_TMFIFO_RX_CTL__LWM_MASK, 0); 1313 ctl = (ctl & ~MLXBF_TMFIFO_RX_CTL__HWM_MASK) | 1314 FIELD_PREP(MLXBF_TMFIFO_RX_CTL__HWM_MASK, 1); 1315 writeq(ctl, fifo->rx.ctl); 1316 } 1317 1318 static void mlxbf_tmfifo_cleanup(struct mlxbf_tmfifo *fifo) 1319 { 1320 int i; 1321 1322 fifo->is_ready = false; 1323 del_timer_sync(&fifo->timer); 1324 mlxbf_tmfifo_disable_irqs(fifo); 1325 cancel_work_sync(&fifo->work); 1326 for (i = 0; i < MLXBF_TMFIFO_VDEV_MAX; i++) 1327 mlxbf_tmfifo_delete_vdev(fifo, i); 1328 } 1329 1330 /* Probe the TMFIFO. */ 1331 static int mlxbf_tmfifo_probe(struct platform_device *pdev) 1332 { 1333 struct virtio_net_config net_config; 1334 struct device *dev = &pdev->dev; 1335 struct mlxbf_tmfifo *fifo; 1336 u64 dev_id; 1337 int i, rc; 1338 1339 rc = acpi_dev_uid_to_integer(ACPI_COMPANION(dev), &dev_id); 1340 if (rc) { 1341 dev_err(dev, "Cannot retrieve UID\n"); 1342 return rc; 1343 } 1344 1345 fifo = devm_kzalloc(dev, sizeof(*fifo), GFP_KERNEL); 1346 if (!fifo) 1347 return -ENOMEM; 1348 1349 spin_lock_init(&fifo->spin_lock[0]); 1350 spin_lock_init(&fifo->spin_lock[1]); 1351 INIT_WORK(&fifo->work, mlxbf_tmfifo_work_handler); 1352 mutex_init(&fifo->lock); 1353 1354 /* Get the resource of the Rx FIFO. */ 1355 fifo->res0 = devm_platform_ioremap_resource(pdev, 0); 1356 if (IS_ERR(fifo->res0)) 1357 return PTR_ERR(fifo->res0); 1358 1359 /* Get the resource of the Tx FIFO. */ 1360 fifo->res1 = devm_platform_ioremap_resource(pdev, 1); 1361 if (IS_ERR(fifo->res1)) 1362 return PTR_ERR(fifo->res1); 1363 1364 if (dev_id == TMFIFO_BF3_UID) { 1365 fifo->rx.ctl = fifo->res1 + MLXBF_TMFIFO_RX_CTL_BF3; 1366 fifo->rx.sts = fifo->res1 + MLXBF_TMFIFO_RX_STS_BF3; 1367 fifo->rx.data = fifo->res0 + MLXBF_TMFIFO_RX_DATA_BF3; 1368 fifo->tx.ctl = fifo->res1 + MLXBF_TMFIFO_TX_CTL_BF3; 1369 fifo->tx.sts = fifo->res1 + MLXBF_TMFIFO_TX_STS_BF3; 1370 fifo->tx.data = fifo->res0 + MLXBF_TMFIFO_TX_DATA_BF3; 1371 } else { 1372 fifo->rx.ctl = fifo->res0 + MLXBF_TMFIFO_RX_CTL; 1373 fifo->rx.sts = fifo->res0 + MLXBF_TMFIFO_RX_STS; 1374 fifo->rx.data = fifo->res0 + MLXBF_TMFIFO_RX_DATA; 1375 fifo->tx.ctl = fifo->res1 + MLXBF_TMFIFO_TX_CTL; 1376 fifo->tx.sts = fifo->res1 + MLXBF_TMFIFO_TX_STS; 1377 fifo->tx.data = fifo->res1 + MLXBF_TMFIFO_TX_DATA; 1378 } 1379 1380 platform_set_drvdata(pdev, fifo); 1381 1382 timer_setup(&fifo->timer, mlxbf_tmfifo_timer, 0); 1383 1384 for (i = 0; i < MLXBF_TM_MAX_IRQ; i++) { 1385 fifo->irq_info[i].index = i; 1386 fifo->irq_info[i].fifo = fifo; 1387 fifo->irq_info[i].irq = platform_get_irq(pdev, i); 1388 rc = devm_request_irq(dev, fifo->irq_info[i].irq, 1389 mlxbf_tmfifo_irq_handler, 0, 1390 "tmfifo", &fifo->irq_info[i]); 1391 if (rc) { 1392 dev_err(dev, "devm_request_irq failed\n"); 1393 fifo->irq_info[i].irq = 0; 1394 return rc; 1395 } 1396 } 1397 1398 mlxbf_tmfifo_set_threshold(fifo); 1399 1400 /* Create the console vdev. */ 1401 rc = mlxbf_tmfifo_create_vdev(dev, fifo, VIRTIO_ID_CONSOLE, 0, NULL, 0); 1402 if (rc) 1403 goto fail; 1404 1405 /* Create the network vdev. */ 1406 memset(&net_config, 0, sizeof(net_config)); 1407 1408 /* A legacy-only interface for now. */ 1409 net_config.mtu = __cpu_to_virtio16(virtio_legacy_is_little_endian(), 1410 ETH_DATA_LEN); 1411 net_config.status = __cpu_to_virtio16(virtio_legacy_is_little_endian(), 1412 VIRTIO_NET_S_LINK_UP); 1413 mlxbf_tmfifo_get_cfg_mac(net_config.mac); 1414 rc = mlxbf_tmfifo_create_vdev(dev, fifo, VIRTIO_ID_NET, 1415 MLXBF_TMFIFO_NET_FEATURES, &net_config, 1416 sizeof(net_config)); 1417 if (rc) 1418 goto fail; 1419 1420 mod_timer(&fifo->timer, jiffies + MLXBF_TMFIFO_TIMER_INTERVAL); 1421 1422 /* Make all updates visible before setting the 'is_ready' flag. */ 1423 virtio_mb(false); 1424 1425 fifo->is_ready = true; 1426 return 0; 1427 1428 fail: 1429 mlxbf_tmfifo_cleanup(fifo); 1430 return rc; 1431 } 1432 1433 /* Device remove function. */ 1434 static void mlxbf_tmfifo_remove(struct platform_device *pdev) 1435 { 1436 struct mlxbf_tmfifo *fifo = platform_get_drvdata(pdev); 1437 1438 mlxbf_tmfifo_cleanup(fifo); 1439 } 1440 1441 static const struct acpi_device_id mlxbf_tmfifo_acpi_match[] = { 1442 { "MLNXBF01", 0 }, 1443 {} 1444 }; 1445 MODULE_DEVICE_TABLE(acpi, mlxbf_tmfifo_acpi_match); 1446 1447 static struct platform_driver mlxbf_tmfifo_driver = { 1448 .probe = mlxbf_tmfifo_probe, 1449 .remove_new = mlxbf_tmfifo_remove, 1450 .driver = { 1451 .name = "bf-tmfifo", 1452 .acpi_match_table = mlxbf_tmfifo_acpi_match, 1453 }, 1454 }; 1455 1456 module_platform_driver(mlxbf_tmfifo_driver); 1457 1458 MODULE_DESCRIPTION("Mellanox BlueField SoC TmFifo Driver"); 1459 MODULE_LICENSE("GPL v2"); 1460 MODULE_AUTHOR("Mellanox Technologies"); 1461