Lines Matching full:ring
8 * extension to Broadcom FlexRM ring manager. The FlexRM ring
13 * rings where each mailbox channel represents a separate FlexRM ring.
66 /* Per-Ring register offsets */
128 /* ====== FlexRM ring descriptor defines ===== */
292 /* ====== FlexRM ring descriptor helper routines ===== */
924 struct flexrm_ring *ring; in flexrm_write_config_in_seqfile() local
927 "Ring#", "State", "BD_Addr", "BD_Size", in flexrm_write_config_in_seqfile()
931 ring = &mbox->rings[i]; in flexrm_write_config_in_seqfile()
932 if (readl(ring->regs + RING_CONTROL) & in flexrm_write_config_in_seqfile()
939 ring->num, state, in flexrm_write_config_in_seqfile()
940 (unsigned long long)ring->bd_dma_base, in flexrm_write_config_in_seqfile()
942 (unsigned long long)ring->cmpl_dma_base, in flexrm_write_config_in_seqfile()
952 struct flexrm_ring *ring; in flexrm_write_stats_in_seqfile() local
955 "Ring#", "BD_Read", "BD_Write", in flexrm_write_stats_in_seqfile()
959 ring = &mbox->rings[i]; in flexrm_write_stats_in_seqfile()
960 bd_read_offset = readl_relaxed(ring->regs + RING_BD_READ_PTR); in flexrm_write_stats_in_seqfile()
961 val = readl_relaxed(ring->regs + RING_BD_START_ADDR); in flexrm_write_stats_in_seqfile()
964 ring->bd_dma_base); in flexrm_write_stats_in_seqfile()
966 ring->num, in flexrm_write_stats_in_seqfile()
968 (u32)ring->bd_write_offset, in flexrm_write_stats_in_seqfile()
969 (u32)ring->cmpl_read_offset, in flexrm_write_stats_in_seqfile()
970 (u32)atomic_read(&ring->msg_send_count), in flexrm_write_stats_in_seqfile()
971 (u32)atomic_read(&ring->msg_cmpl_count)); in flexrm_write_stats_in_seqfile()
975 static int flexrm_new_request(struct flexrm_ring *ring, in flexrm_new_request() argument
992 spin_lock_irqsave(&ring->lock, flags); in flexrm_new_request()
993 reqid = bitmap_find_free_region(ring->requests_bmap, in flexrm_new_request()
995 spin_unlock_irqrestore(&ring->lock, flags); in flexrm_new_request()
998 ring->requests[reqid] = msg; in flexrm_new_request()
1001 ret = flexrm_dma_map(ring->mbox->dev, msg); in flexrm_new_request()
1003 ring->requests[reqid] = NULL; in flexrm_new_request()
1004 spin_lock_irqsave(&ring->lock, flags); in flexrm_new_request()
1005 bitmap_release_region(ring->requests_bmap, reqid, 0); in flexrm_new_request()
1006 spin_unlock_irqrestore(&ring->lock, flags); in flexrm_new_request()
1011 read_offset = readl_relaxed(ring->regs + RING_BD_READ_PTR); in flexrm_new_request()
1012 val = readl_relaxed(ring->regs + RING_BD_START_ADDR); in flexrm_new_request()
1014 read_offset += (u32)(BD_START_ADDR_DECODE(val) - ring->bd_dma_base); in flexrm_new_request()
1025 write_offset = ring->bd_write_offset; in flexrm_new_request()
1027 if (!flexrm_is_next_table_desc(ring->bd_base + write_offset)) in flexrm_new_request()
1041 /* Write descriptors to ring */ in flexrm_new_request()
1043 ring->bd_base + ring->bd_write_offset, in flexrm_new_request()
1044 RING_BD_TOGGLE_VALID(ring->bd_write_offset), in flexrm_new_request()
1045 ring->bd_base, ring->bd_base + RING_BD_SIZE); in flexrm_new_request()
1052 /* Save ring BD write offset */ in flexrm_new_request()
1053 ring->bd_write_offset = (unsigned long)(next - ring->bd_base); in flexrm_new_request()
1056 atomic_inc_return(&ring->msg_send_count); in flexrm_new_request()
1064 flexrm_dma_unmap(ring->mbox->dev, msg); in flexrm_new_request()
1065 ring->requests[reqid] = NULL; in flexrm_new_request()
1066 spin_lock_irqsave(&ring->lock, flags); in flexrm_new_request()
1067 bitmap_release_region(ring->requests_bmap, reqid, 0); in flexrm_new_request()
1068 spin_unlock_irqrestore(&ring->lock, flags); in flexrm_new_request()
1074 static int flexrm_process_completions(struct flexrm_ring *ring) in flexrm_process_completions() argument
1081 struct mbox_chan *chan = &ring->mbox->controller.chans[ring->num]; in flexrm_process_completions()
1083 spin_lock_irqsave(&ring->lock, flags); in flexrm_process_completions()
1093 cmpl_write_offset = readl_relaxed(ring->regs + RING_CMPL_WRITE_PTR); in flexrm_process_completions()
1095 cmpl_read_offset = ring->cmpl_read_offset; in flexrm_process_completions()
1096 ring->cmpl_read_offset = cmpl_write_offset; in flexrm_process_completions()
1098 spin_unlock_irqrestore(&ring->lock, flags); in flexrm_process_completions()
1104 desc = *((u64 *)(ring->cmpl_base + cmpl_read_offset)); in flexrm_process_completions()
1114 dev_warn(ring->mbox->dev, in flexrm_process_completions()
1115 "ring%d got completion desc=0x%lx with error %d\n", in flexrm_process_completions()
1116 ring->num, (unsigned long)desc, err); in flexrm_process_completions()
1123 msg = ring->requests[reqid]; in flexrm_process_completions()
1125 dev_warn(ring->mbox->dev, in flexrm_process_completions()
1126 "ring%d null msg pointer for completion desc=0x%lx\n", in flexrm_process_completions()
1127 ring->num, (unsigned long)desc); in flexrm_process_completions()
1132 ring->requests[reqid] = NULL; in flexrm_process_completions()
1133 spin_lock_irqsave(&ring->lock, flags); in flexrm_process_completions()
1134 bitmap_release_region(ring->requests_bmap, reqid, 0); in flexrm_process_completions()
1135 spin_unlock_irqrestore(&ring->lock, flags); in flexrm_process_completions()
1138 flexrm_dma_unmap(ring->mbox->dev, msg); in flexrm_process_completions()
1145 atomic_inc_return(&ring->msg_cmpl_count); in flexrm_process_completions()
1179 /* Ring related errors will be informed via completion descriptors */ in flexrm_irq_event()
1196 struct flexrm_ring *ring = chan->con_priv; in flexrm_send_data() local
1202 rc = flexrm_new_request(ring, msg, in flexrm_send_data()
1213 return flexrm_new_request(ring, NULL, data); in flexrm_send_data()
1229 struct flexrm_ring *ring = chan->con_priv; in flexrm_startup() local
1232 ring->bd_base = dma_pool_alloc(ring->mbox->bd_pool, in flexrm_startup()
1233 GFP_KERNEL, &ring->bd_dma_base); in flexrm_startup()
1234 if (!ring->bd_base) { in flexrm_startup()
1235 dev_err(ring->mbox->dev, in flexrm_startup()
1236 "can't allocate BD memory for ring%d\n", in flexrm_startup()
1237 ring->num); in flexrm_startup()
1247 next_addr += ring->bd_dma_base; in flexrm_startup()
1253 flexrm_write_desc(ring->bd_base + off, d); in flexrm_startup()
1257 ring->cmpl_base = dma_pool_zalloc(ring->mbox->cmpl_pool, in flexrm_startup()
1258 GFP_KERNEL, &ring->cmpl_dma_base); in flexrm_startup()
1259 if (!ring->cmpl_base) { in flexrm_startup()
1260 dev_err(ring->mbox->dev, in flexrm_startup()
1261 "can't allocate completion memory for ring%d\n", in flexrm_startup()
1262 ring->num); in flexrm_startup()
1268 if (ring->irq == UINT_MAX) { in flexrm_startup()
1269 dev_err(ring->mbox->dev, in flexrm_startup()
1270 "ring%d IRQ not available\n", ring->num); in flexrm_startup()
1274 ret = request_threaded_irq(ring->irq, in flexrm_startup()
1277 0, dev_name(ring->mbox->dev), ring); in flexrm_startup()
1279 dev_err(ring->mbox->dev, in flexrm_startup()
1280 "failed to request ring%d IRQ\n", ring->num); in flexrm_startup()
1283 ring->irq_requested = true; in flexrm_startup()
1286 ring->irq_aff_hint = CPU_MASK_NONE; in flexrm_startup()
1287 val = ring->mbox->num_rings; in flexrm_startup()
1289 cpumask_set_cpu((ring->num / val) % num_online_cpus(), in flexrm_startup()
1290 &ring->irq_aff_hint); in flexrm_startup()
1291 ret = irq_update_affinity_hint(ring->irq, &ring->irq_aff_hint); in flexrm_startup()
1293 dev_err(ring->mbox->dev, in flexrm_startup()
1294 "failed to set IRQ affinity hint for ring%d\n", in flexrm_startup()
1295 ring->num); in flexrm_startup()
1299 /* Disable/inactivate ring */ in flexrm_startup()
1300 writel_relaxed(0x0, ring->regs + RING_CONTROL); in flexrm_startup()
1303 val = BD_START_ADDR_VALUE(ring->bd_dma_base); in flexrm_startup()
1304 writel_relaxed(val, ring->regs + RING_BD_START_ADDR); in flexrm_startup()
1307 ring->bd_write_offset = in flexrm_startup()
1308 readl_relaxed(ring->regs + RING_BD_WRITE_PTR); in flexrm_startup()
1309 ring->bd_write_offset *= RING_DESC_SIZE; in flexrm_startup()
1312 val = CMPL_START_ADDR_VALUE(ring->cmpl_dma_base); in flexrm_startup()
1313 writel_relaxed(val, ring->regs + RING_CMPL_START_ADDR); in flexrm_startup()
1316 ring->cmpl_read_offset = in flexrm_startup()
1317 readl_relaxed(ring->regs + RING_CMPL_WRITE_PTR); in flexrm_startup()
1318 ring->cmpl_read_offset *= RING_DESC_SIZE; in flexrm_startup()
1320 /* Read ring Tx, Rx, and Outstanding counts to clear */ in flexrm_startup()
1321 readl_relaxed(ring->regs + RING_NUM_REQ_RECV_LS); in flexrm_startup()
1322 readl_relaxed(ring->regs + RING_NUM_REQ_RECV_MS); in flexrm_startup()
1323 readl_relaxed(ring->regs + RING_NUM_REQ_TRANS_LS); in flexrm_startup()
1324 readl_relaxed(ring->regs + RING_NUM_REQ_TRANS_MS); in flexrm_startup()
1325 readl_relaxed(ring->regs + RING_NUM_REQ_OUTSTAND); in flexrm_startup()
1329 val |= (ring->msi_timer_val << MSI_TIMER_VAL_SHIFT); in flexrm_startup()
1331 val |= (ring->msi_count_threshold & MSI_COUNT_MASK) << MSI_COUNT_SHIFT; in flexrm_startup()
1332 writel_relaxed(val, ring->regs + RING_MSI_CONTROL); in flexrm_startup()
1334 /* Enable/activate ring */ in flexrm_startup()
1336 writel_relaxed(val, ring->regs + RING_CONTROL); in flexrm_startup()
1339 atomic_set(&ring->msg_send_count, 0); in flexrm_startup()
1340 atomic_set(&ring->msg_cmpl_count, 0); in flexrm_startup()
1345 free_irq(ring->irq, ring); in flexrm_startup()
1346 ring->irq_requested = false; in flexrm_startup()
1348 dma_pool_free(ring->mbox->cmpl_pool, in flexrm_startup()
1349 ring->cmpl_base, ring->cmpl_dma_base); in flexrm_startup()
1350 ring->cmpl_base = NULL; in flexrm_startup()
1352 dma_pool_free(ring->mbox->bd_pool, in flexrm_startup()
1353 ring->bd_base, ring->bd_dma_base); in flexrm_startup()
1354 ring->bd_base = NULL; in flexrm_startup()
1364 struct flexrm_ring *ring = chan->con_priv; in flexrm_shutdown() local
1366 /* Disable/inactivate ring */ in flexrm_shutdown()
1367 writel_relaxed(0x0, ring->regs + RING_CONTROL); in flexrm_shutdown()
1369 /* Set ring flush state */ in flexrm_shutdown()
1372 ring->regs + RING_CONTROL); in flexrm_shutdown()
1374 if (readl_relaxed(ring->regs + RING_FLUSH_DONE) & in flexrm_shutdown()
1380 dev_err(ring->mbox->dev, in flexrm_shutdown()
1381 "setting ring%d flush state timedout\n", ring->num); in flexrm_shutdown()
1383 /* Clear ring flush state */ in flexrm_shutdown()
1385 writel_relaxed(0x0, ring->regs + RING_CONTROL); in flexrm_shutdown()
1387 if (!(readl_relaxed(ring->regs + RING_FLUSH_DONE) & in flexrm_shutdown()
1393 dev_err(ring->mbox->dev, in flexrm_shutdown()
1394 "clearing ring%d flush state timedout\n", ring->num); in flexrm_shutdown()
1398 msg = ring->requests[reqid]; in flexrm_shutdown()
1403 ring->requests[reqid] = NULL; in flexrm_shutdown()
1406 flexrm_dma_unmap(ring->mbox->dev, msg); in flexrm_shutdown()
1414 bitmap_zero(ring->requests_bmap, RING_MAX_REQ_COUNT); in flexrm_shutdown()
1417 if (ring->irq_requested) { in flexrm_shutdown()
1418 irq_update_affinity_hint(ring->irq, NULL); in flexrm_shutdown()
1419 free_irq(ring->irq, ring); in flexrm_shutdown()
1420 ring->irq_requested = false; in flexrm_shutdown()
1423 /* Free-up completion descriptor ring */ in flexrm_shutdown()
1424 if (ring->cmpl_base) { in flexrm_shutdown()
1425 dma_pool_free(ring->mbox->cmpl_pool, in flexrm_shutdown()
1426 ring->cmpl_base, ring->cmpl_dma_base); in flexrm_shutdown()
1427 ring->cmpl_base = NULL; in flexrm_shutdown()
1430 /* Free-up BD descriptor ring */ in flexrm_shutdown()
1431 if (ring->bd_base) { in flexrm_shutdown()
1432 dma_pool_free(ring->mbox->bd_pool, in flexrm_shutdown()
1433 ring->bd_base, ring->bd_dma_base); in flexrm_shutdown()
1434 ring->bd_base = NULL; in flexrm_shutdown()
1449 struct flexrm_ring *ring; in flexrm_mbox_of_xlate() local
1464 ring = chan->con_priv; in flexrm_mbox_of_xlate()
1465 ring->msi_count_threshold = pa->args[1]; in flexrm_mbox_of_xlate()
1466 ring->msi_timer_val = pa->args[2]; in flexrm_mbox_of_xlate()
1477 struct flexrm_ring *ring = &mbox->rings[desc->msi_index]; in flexrm_mbox_msi_write() local
1479 /* Configure per-Ring MSI registers */ in flexrm_mbox_msi_write()
1480 writel_relaxed(msg->address_lo, ring->regs + RING_MSI_ADDR_LS); in flexrm_mbox_msi_write()
1481 writel_relaxed(msg->address_hi, ring->regs + RING_MSI_ADDR_MS); in flexrm_mbox_msi_write()
1482 writel_relaxed(msg->data, ring->regs + RING_MSI_DATA_VALUE); in flexrm_mbox_msi_write()
1491 struct flexrm_ring *ring; in flexrm_mbox_probe() local
1526 /* Allocate driver ring structs */ in flexrm_mbox_probe()
1527 ring = devm_kcalloc(dev, mbox->num_rings, sizeof(*ring), GFP_KERNEL); in flexrm_mbox_probe()
1528 if (!ring) { in flexrm_mbox_probe()
1532 mbox->rings = ring; in flexrm_mbox_probe()
1534 /* Initialize members of driver ring structs */ in flexrm_mbox_probe()
1537 ring = &mbox->rings[index]; in flexrm_mbox_probe()
1538 ring->num = index; in flexrm_mbox_probe()
1539 ring->mbox = mbox; in flexrm_mbox_probe()
1547 ring->regs = regs; in flexrm_mbox_probe()
1549 ring->irq = UINT_MAX; in flexrm_mbox_probe()
1550 ring->irq_requested = false; in flexrm_mbox_probe()
1551 ring->msi_timer_val = MSI_TIMER_VAL_MASK; in flexrm_mbox_probe()
1552 ring->msi_count_threshold = 0x1; in flexrm_mbox_probe()
1553 memset(ring->requests, 0, sizeof(ring->requests)); in flexrm_mbox_probe()
1554 ring->bd_base = NULL; in flexrm_mbox_probe()
1555 ring->bd_dma_base = 0; in flexrm_mbox_probe()
1556 ring->cmpl_base = NULL; in flexrm_mbox_probe()
1557 ring->cmpl_dma_base = 0; in flexrm_mbox_probe()
1558 atomic_set(&ring->msg_send_count, 0); in flexrm_mbox_probe()
1559 atomic_set(&ring->msg_cmpl_count, 0); in flexrm_mbox_probe()
1560 spin_lock_init(&ring->lock); in flexrm_mbox_probe()
1561 bitmap_zero(ring->requests_bmap, RING_MAX_REQ_COUNT); in flexrm_mbox_probe()
1562 ring->cmpl_read_offset = 0; in flexrm_mbox_probe()
1573 /* Create DMA pool for ring BD memory */ in flexrm_mbox_probe()
1581 /* Create DMA pool for ring completion memory */ in flexrm_mbox_probe()
1589 /* Allocate platform MSIs for each ring */ in flexrm_mbox_probe()
1595 /* Save alloced IRQ numbers for each ring */ in flexrm_mbox_probe()