Lines Matching defs:dev_entry
107 struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
111 spin_lock(&dev_entry->cb_spinlock);
113 list_for_each_entry(cb, &dev_entry->cb_list, debugfs_list) {
127 spin_unlock(&dev_entry->cb_spinlock);
138 struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
142 spin_lock(&dev_entry->cs_spinlock);
144 list_for_each_entry(cs, &dev_entry->cs_list, debugfs_list) {
158 spin_unlock(&dev_entry->cs_spinlock);
169 struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
173 spin_lock(&dev_entry->cs_job_spinlock);
175 list_for_each_entry(job, &dev_entry->cs_job_list, debugfs_list) {
195 spin_unlock(&dev_entry->cs_job_spinlock);
206 struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
212 spin_lock(&dev_entry->userptr_spinlock);
214 list_for_each_entry(userptr, &dev_entry->userptr_list, debugfs_list) {
226 spin_unlock(&dev_entry->userptr_spinlock);
237 struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
251 mutex_lock(&dev_entry->ctx_mem_hash_mutex);
253 list_for_each_entry(ctx, &dev_entry->ctx_mem_hash_list, debugfs_list) {
322 mutex_unlock(&dev_entry->ctx_mem_hash_mutex);
324 ctx = hl_get_compute_ctx(dev_entry->hdev);
352 struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
360 spin_lock(&dev_entry->userptr_spinlock);
362 list_for_each_entry(userptr, &dev_entry->userptr_list, debugfs_list) {
363 if (dev_entry->userptr_lookup >= userptr->addr &&
364 dev_entry->userptr_lookup < userptr->addr + userptr->size) {
373 if (dev_entry->userptr_lookup >= sg_start &&
374 dev_entry->userptr_lookup < sg_end) {
375 dma_addr += (dev_entry->userptr_lookup -
384 dev_entry->userptr_lookup,
393 spin_unlock(&dev_entry->userptr_spinlock);
406 struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
414 dev_entry->userptr_lookup = value;
422 struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
423 struct hl_device *hdev = dev_entry->hdev;
426 u64 virt_addr = dev_entry->mmu_addr, phys_addr;
429 if (dev_entry->mmu_asid == HL_KERNEL_ASID_ID)
448 (dev_entry->mmu_addr != hops_info.scrambled_vaddr))
451 dev_entry->mmu_asid, dev_entry->mmu_addr,
457 dev_entry->mmu_asid, dev_entry->mmu_addr, phys_addr);
469 if (dev_entry->mmu_asid != HL_KERNEL_ASID_ID)
480 struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
481 struct hl_device *hdev = dev_entry->hdev;
497 rc = kstrtouint(kbuf, 10, &dev_entry->mmu_asid);
503 rc = kstrtoull(c+3, 16, &dev_entry->mmu_addr);
518 struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
519 struct hl_device *hdev = dev_entry->hdev;
522 if (!dev_entry->mmu_cap_mask) {
527 rc = hdev->asic_funcs->ack_mmu_errors(hdev, dev_entry->mmu_cap_mask);
542 struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
543 struct hl_device *hdev = dev_entry->hdev;
558 rc = kstrtoull(kbuf, 16, &dev_entry->mmu_cap_mask);
572 struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
573 struct hl_device *hdev = dev_entry->hdev;
1558 static void add_secured_nodes(struct hl_dbg_device_entry *dev_entry, struct dentry *root)
1563 &dev_entry->i2c_bus);
1568 &dev_entry->i2c_addr);
1573 &dev_entry->i2c_reg);
1578 &dev_entry->i2c_len);
1583 dev_entry,
1589 dev_entry,
1595 dev_entry,
1601 dev_entry,
1605 static void add_files_to_device(struct hl_device *hdev, struct hl_dbg_device_entry *dev_entry,
1620 dev_entry,
1626 &dev_entry->addr);
1631 dev_entry,
1637 dev_entry,
1643 dev_entry,
1649 dev_entry,
1655 dev_entry,
1661 dev_entry,
1667 dev_entry,
1673 dev_entry,
1679 dev_entry,
1685 &dev_entry->data_dma_blob_desc);
1690 dev_entry,
1696 &dev_entry->mon_dump_blob_desc);
1706 dev_entry,
1712 dev_entry,
1725 for (i = 0, entry = dev_entry->entry_arr ; i < count ; i++, entry++) {
1732 entry->dev_entry = dev_entry;
1738 struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1741 dev_entry->hdev = hdev;
1742 dev_entry->entry_arr = kmalloc_array(count, sizeof(struct hl_debugfs_entry), GFP_KERNEL);
1743 if (!dev_entry->entry_arr)
1746 dev_entry->data_dma_blob_desc.size = 0;
1747 dev_entry->data_dma_blob_desc.data = NULL;
1748 dev_entry->mon_dump_blob_desc.size = 0;
1749 dev_entry->mon_dump_blob_desc.data = NULL;
1751 INIT_LIST_HEAD(&dev_entry->file_list);
1752 INIT_LIST_HEAD(&dev_entry->cb_list);
1753 INIT_LIST_HEAD(&dev_entry->cs_list);
1754 INIT_LIST_HEAD(&dev_entry->cs_job_list);
1755 INIT_LIST_HEAD(&dev_entry->userptr_list);
1756 INIT_LIST_HEAD(&dev_entry->ctx_mem_hash_list);
1757 mutex_init(&dev_entry->file_mutex);
1758 init_rwsem(&dev_entry->state_dump_sem);
1759 spin_lock_init(&dev_entry->cb_spinlock);
1760 spin_lock_init(&dev_entry->cs_spinlock);
1761 spin_lock_init(&dev_entry->cs_job_spinlock);
1762 spin_lock_init(&dev_entry->userptr_spinlock);
1763 mutex_init(&dev_entry->ctx_mem_hash_mutex);
1787 struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1789 dev_entry->root = hdev->drm.accel->debugfs_root;
1791 add_files_to_device(hdev, dev_entry, dev_entry->root);
1794 add_secured_nodes(dev_entry, dev_entry->root);
1799 struct hl_dbg_device_entry *dev_entry = &hpriv->hdev->hl_debugfs;
1801 mutex_lock(&dev_entry->file_mutex);
1802 list_add(&hpriv->debugfs_list, &dev_entry->file_list);
1803 mutex_unlock(&dev_entry->file_mutex);
1808 struct hl_dbg_device_entry *dev_entry = &hpriv->hdev->hl_debugfs;
1810 mutex_lock(&dev_entry->file_mutex);
1812 mutex_unlock(&dev_entry->file_mutex);
1817 struct hl_dbg_device_entry *dev_entry = &cb->hdev->hl_debugfs;
1819 spin_lock(&dev_entry->cb_spinlock);
1820 list_add(&cb->debugfs_list, &dev_entry->cb_list);
1821 spin_unlock(&dev_entry->cb_spinlock);
1826 struct hl_dbg_device_entry *dev_entry = &cb->hdev->hl_debugfs;
1828 spin_lock(&dev_entry->cb_spinlock);
1830 spin_unlock(&dev_entry->cb_spinlock);
1835 struct hl_dbg_device_entry *dev_entry = &cs->ctx->hdev->hl_debugfs;
1837 spin_lock(&dev_entry->cs_spinlock);
1838 list_add(&cs->debugfs_list, &dev_entry->cs_list);
1839 spin_unlock(&dev_entry->cs_spinlock);
1844 struct hl_dbg_device_entry *dev_entry = &cs->ctx->hdev->hl_debugfs;
1846 spin_lock(&dev_entry->cs_spinlock);
1848 spin_unlock(&dev_entry->cs_spinlock);
1853 struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1855 spin_lock(&dev_entry->cs_job_spinlock);
1856 list_add(&job->debugfs_list, &dev_entry->cs_job_list);
1857 spin_unlock(&dev_entry->cs_job_spinlock);
1862 struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1864 spin_lock(&dev_entry->cs_job_spinlock);
1866 spin_unlock(&dev_entry->cs_job_spinlock);
1871 struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1873 spin_lock(&dev_entry->userptr_spinlock);
1874 list_add(&userptr->debugfs_list, &dev_entry->userptr_list);
1875 spin_unlock(&dev_entry->userptr_spinlock);
1881 struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1883 spin_lock(&dev_entry->userptr_spinlock);
1885 spin_unlock(&dev_entry->userptr_spinlock);
1890 struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1892 mutex_lock(&dev_entry->ctx_mem_hash_mutex);
1893 list_add(&ctx->debugfs_list, &dev_entry->ctx_mem_hash_list);
1894 mutex_unlock(&dev_entry->ctx_mem_hash_mutex);
1899 struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1901 mutex_lock(&dev_entry->ctx_mem_hash_mutex);
1903 mutex_unlock(&dev_entry->ctx_mem_hash_mutex);
1916 struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1918 down_write(&dev_entry->state_dump_sem);
1920 dev_entry->state_dump_head = (dev_entry->state_dump_head + 1) %
1921 ARRAY_SIZE(dev_entry->state_dump);
1922 vfree(dev_entry->state_dump[dev_entry->state_dump_head]);
1923 dev_entry->state_dump[dev_entry->state_dump_head] = data;
1925 up_write(&dev_entry->state_dump_sem);