Home
last modified time | relevance | path

Searched refs:qdev (Results 1 – 25 of 25) sorted by relevance

/linux/drivers/net/ethernet/qlogic/
H A Dqla3xxx.c103 static int ql_sem_spinlock(struct ql3_adapter *qdev, in ql_sem_spinlock() argument
107 qdev->mem_map_registers; in ql_sem_spinlock()
122 static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask) in ql_sem_unlock() argument
125 qdev->mem_map_registers; in ql_sem_unlock()
130 static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits) in ql_sem_lock() argument
133 qdev->mem_map_registers; in ql_sem_lock()
144 static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev) in ql_wait_for_drvr_lock() argument
149 if (ql_sem_lock(qdev, in ql_wait_for_drvr_lock()
151 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) in ql_wait_for_drvr_lock()
153 netdev_printk(KERN_DEBUG, qdev->ndev, in ql_wait_for_drvr_lock()
[all …]
/linux/drivers/dma/amd/qdma/
H A Dqdma.c39 static inline u32 qdma_get_intr_ring_idx(struct qdma_device *qdev) in qdma_get_intr_ring_idx() argument
43 idx = qdev->qintr_rings[qdev->qintr_ring_idx++].ridx; in qdma_get_intr_ring_idx()
44 qdev->qintr_ring_idx %= qdev->qintr_ring_num; in qdma_get_intr_ring_idx()
49 static u64 qdma_get_field(const struct qdma_device *qdev, const u32 *data, in qdma_get_field() argument
52 const struct qdma_reg_field *f = &qdev->rfields[field]; in qdma_get_field()
86 static void qdma_set_field(const struct qdma_device *qdev, u32 *data, in qdma_set_field() argument
89 const struct qdma_reg_field *f = &qdev->rfields[field]; in qdma_set_field()
103 static inline int qdma_reg_write(const struct qdma_device *qdev, in qdma_reg_write() argument
106 const struct qdma_reg *r = &qdev->roffs[reg]; in qdma_reg_write()
110 ret = regmap_bulk_write(qdev->regmap, r->off, data, r->count); in qdma_reg_write()
[all …]
H A Dqdma.h45 #define qdma_err(qdev, fmt, args...) \ argument
46 dev_err(&(qdev)->pdev->dev, fmt, ##args)
48 #define qdma_dbg(qdev, fmt, args...) \ argument
49 dev_dbg(&(qdev)->pdev->dev, fmt, ##args)
51 #define qdma_info(qdev, fmt, args...) \ argument
52 dev_info(&(qdev)->pdev->dev, fmt, ##args)
205 struct qdma_device *qdev; member
223 struct qdma_device *qdev; member
/linux/drivers/accel/qaic/
H A Dqaic_drv.c127 struct qaic_device *qdev = to_qaic_device(dev); in qaicm_pci_release() local
129 pci_set_drvdata(qdev->pdev, NULL); in qaicm_pci_release()
144 struct qaic_device *qdev = qddev->qdev; in qaic_open() local
149 rcu_id = srcu_read_lock(&qdev->dev_lock); in qaic_open()
150 if (qdev->dev_state != QAIC_ONLINE) { in qaic_open()
180 srcu_read_unlock(&qdev->dev_lock, rcu_id); in qaic_open()
189 srcu_read_unlock(&qdev->dev_lock, rcu_id); in qaic_open()
197 struct qaic_device *qdev; in qaic_postclose() local
205 qdev = qddev->qdev; in qaic_postclose()
206 qdev_rcu_id = srcu_read_lock(&qdev->dev_lock); in qaic_postclose()
[all …]
H A Dqaic_debugfs.c31 struct qaic_device *qdev; member
48 struct qaic_device *qdev; in bootlog_show() local
52 qdev = s->private; in bootlog_show()
53 mutex_lock(&qdev->bootlog_mutex); in bootlog_show()
54 list_for_each_entry(page, &qdev->bootlog, node) { in bootlog_show()
62 mutex_unlock(&qdev->bootlog_mutex); in bootlog_show()
100 struct qaic_device *qdev = qddev->qdev; in qaic_debugfs_init() local
108 debugfs_create_file("bootlog", 0400, debugfs_root, qdev, &bootlog_fops); in qaic_debugfs_init()
113 for (i = 0; i < qdev->num_dbc && i < 256; ++i) { in qaic_debugfs_init()
116 debugfs_create_file("fifo_size", 0400, debugfs_dir, &qdev->dbc[i], &fifo_size_fops); in qaic_debugfs_init()
[all …]
H A Dqaic_ssr.c112 struct qaic_device *qdev; member
160 struct qaic_device *qdev; member
209 void qaic_clean_up_ssr(struct qaic_device *qdev) in qaic_clean_up_ssr() argument
211 struct ssr_crashdump *ssr_crash = qdev->ssr_mhi_buf; in qaic_clean_up_ssr()
216 qaic_dbc_exit_ssr(qdev); in qaic_clean_up_ssr()
258 static int send_xfer_done(struct qaic_device *qdev, void *resp, u32 dbc_id) in send_xfer_done() argument
269 ret = mhi_queue_buf(qdev->ssr_ch, DMA_FROM_DEVICE, resp, SSR_RESP_MSG_SZ, MHI_EOT); in send_xfer_done()
277 ret = mhi_queue_buf(qdev->ssr_ch, DMA_TO_DEVICE, xfer_done, sizeof(*xfer_done), MHI_EOT); in send_xfer_done()
289 static int mem_read_req(struct qaic_device *qdev, u64 dest_addr, u64 dest_len) in mem_read_req() argument
291 struct ssr_crashdump *ssr_crash = qdev->ssr_mhi_buf; in mem_read_req()
[all …]
H A Dqaic_control.c237 struct qaic_device *qdev; member
298 static void save_dbc_buf(struct qaic_device *qdev, struct ioctl_resources *resources, in save_dbc_buf() argument
304 wait_event_interruptible(qdev->dbc[dbc_id].dbc_release, !qdev->dbc[dbc_id].in_use); in save_dbc_buf()
305 qdev->dbc[dbc_id].req_q_base = resources->buf; in save_dbc_buf()
306 qdev->dbc[dbc_id].rsp_q_base = resources->rsp_q_base; in save_dbc_buf()
307 qdev->dbc[dbc_id].dma_addr = resources->dma_addr; in save_dbc_buf()
308 qdev->dbc[dbc_id].total_size = resources->total_size; in save_dbc_buf()
309 qdev->dbc[dbc_id].nelem = resources->nelem; in save_dbc_buf()
310 enable_dbc(qdev, dbc_id, usr); in save_dbc_buf()
311 qdev->dbc[dbc_id].in_use = true; in save_dbc_buf()
[all …]
H A Dqaic_data.c173 static int clone_range_of_sgt_for_slice(struct qaic_device *qdev, struct sg_table **sgt_out, in clone_range_of_sgt_for_slice() argument
256 static int encode_reqs(struct qaic_device *qdev, struct bo_slice *slice, in encode_reqs() argument
391 static int qaic_map_one_slice(struct qaic_device *qdev, struct qaic_bo *bo, in qaic_map_one_slice() argument
398 ret = clone_range_of_sgt_for_slice(qdev, &sgt, bo->sgt, slice_ent->size, slice_ent->offset); in qaic_map_one_slice()
422 ret = encode_reqs(qdev, slice, slice_ent); in qaic_map_one_slice()
444 static int create_sgt(struct qaic_device *qdev, struct sg_table **sgt_out, u64 size) in create_sgt() argument
558 static int qaic_validate_req(struct qaic_device *qdev, struct qaic_attach_slice_entry *slice_ent, in qaic_validate_req() argument
714 struct qaic_device *qdev; in qaic_create_bo_ioctl() local
734 qdev = usr->qddev->qdev; in qaic_create_bo_ioctl()
735 qdev_rcu_id = srcu_read_lock(&qdev->dev_lock); in qaic_create_bo_ioctl()
[all …]
H A Dqaic_ras.c281 static void decode_ras_msg(struct qaic_device *qdev, struct ras_data *msg) in decode_ras_msg() argument
294 pci_warn(qdev->pdev, "Dropping RAS message with invalid magic %x\n", msg->magic); in decode_ras_msg()
299 pci_warn(qdev->pdev, "Dropping RAS message with invalid version %d\n", msg->ver); in decode_ras_msg()
304 pci_warn(qdev->pdev, "Dropping non-PUSH RAS message\n"); in decode_ras_msg()
309 pci_warn(qdev->pdev, "Dropping RAS message with invalid len %d\n", msg->len); in decode_ras_msg()
314 pci_warn(qdev->pdev, "Dropping RAS message with err type %d\n", msg->err_type); in decode_ras_msg()
325 …dev_printk(level, &qdev->pdev->dev, "RAS event.\nClass:%s\nDescription:%s %s %s\nError Threshold f… in decode_ras_msg()
341 …dev_printk(level, &qdev->pdev->dev, "RAS event.\nClass:%s\nDescription:%s %s %s\nError Threshold f… in decode_ras_msg()
384 …dev_printk(level, &qdev->pdev->dev, "RAS event.\nClass:%s\nDescription:%s %s %s\nError Threshold f… in decode_ras_msg()
405 …dev_printk(level, &qdev->pdev->dev, "RAS event.\nClass:%s\nDescription:%s %s %s\nError Threshold f… in decode_ras_msg()
[all …]
H A Dqaic_timesync.c78 struct qaic_device *qdev; member
94 struct qaic_device *qdev; member
183 struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(mhi_dev->mhi_cntrl->cntrl_dev)); in qaic_timesync_probe() local
196 mqtsdev->qdev = qdev; in qaic_timesync_probe()
197 mqtsdev->dev = &qdev->pdev->dev; in qaic_timesync_probe()
211 mqtsdev->qtimer_addr = qdev->bar_mhi + QTIMER_REG_OFFSET; in qaic_timesync_probe()
216 qdev->mqts_ch = mhi_dev; in qaic_timesync_probe()
232 mqtsdev->qdev->mqts_ch = NULL; in qaic_timesync_remove()
262 struct qaic_device *qdev = resp->qdev; in qaic_boot_timesync_worker() local
267 mhi_dev = qdev->qts_ch; in qaic_boot_timesync_worker()
[all …]
H A Dqaic.h34 #define to_qaic_device(dev) (to_qaic_drm_device((dev))->qdev)
84 struct qaic_device *qdev; member
215 struct qaic_device *qdev; member
320 int get_cntl_version(struct qaic_device *qdev, struct qaic_user *usr, u16 *major, u16 *minor);
326 int qaic_control_open(struct qaic_device *qdev);
327 void qaic_control_close(struct qaic_device *qdev);
328 void qaic_release_usr(struct qaic_device *qdev, struct qaic_user *usr);
332 int disable_dbc(struct qaic_device *qdev, u32 dbc_id, struct qaic_user *usr);
333 void enable_dbc(struct qaic_device *qdev, u32 dbc_id, struct qaic_user *usr);
334 void wakeup_dbc(struct qaic_device *qdev, u32 dbc_id);
[all …]
/linux/drivers/gpu/drm/qxl/
H A Dqxl_cmd.c36 static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap);
179 qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release, in qxl_push_command_ring_release() argument
185 cmd.data = qxl_bo_physical_address(qdev, release->release_bo, release->release_offset); in qxl_push_command_ring_release()
187 return qxl_ring_push(qdev->command_ring, &cmd, interruptible); in qxl_push_command_ring_release()
191 qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release, in qxl_push_cursor_ring_release() argument
197 cmd.data = qxl_bo_physical_address(qdev, release->release_bo, release->release_offset); in qxl_push_cursor_ring_release()
199 return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible); in qxl_push_cursor_ring_release()
202 bool qxl_queue_garbage_collect(struct qxl_device *qdev, bool flush) in qxl_queue_garbage_collect() argument
204 if (!qxl_check_idle(qdev->release_ring)) { in qxl_queue_garbage_collect()
205 schedule_work(&qdev->gc_work); in qxl_queue_garbage_collect()
[all …]
H A Dqxl_display.c52 static int qxl_alloc_client_monitors_config(struct qxl_device *qdev, in qxl_alloc_client_monitors_config() argument
55 if (qdev->client_monitors_config && in qxl_alloc_client_monitors_config()
56 count > qdev->client_monitors_config->count) { in qxl_alloc_client_monitors_config()
57 kfree(qdev->client_monitors_config); in qxl_alloc_client_monitors_config()
58 qdev->client_monitors_config = NULL; in qxl_alloc_client_monitors_config()
60 if (!qdev->client_monitors_config) { in qxl_alloc_client_monitors_config()
61 qdev->client_monitors_config = kzalloc_flex(*qdev->client_monitors_config, in qxl_alloc_client_monitors_config()
63 if (!qdev->client_monitors_config) in qxl_alloc_client_monitors_config()
66 qdev->client_monitors_config->count = count; in qxl_alloc_client_monitors_config()
77 static int qxl_display_copy_rom_client_monitors_config(struct qxl_device *qdev) in qxl_display_copy_rom_client_monitors_config() argument
[all …]
H A Dqxl_drv.h258 int qxl_device_init(struct qxl_device *qdev, struct pci_dev *pdev);
259 void qxl_device_fini(struct qxl_device *qdev);
261 int qxl_modeset_init(struct qxl_device *qdev);
262 void qxl_modeset_fini(struct qxl_device *qdev);
264 int qxl_bo_init(struct qxl_device *qdev);
265 void qxl_bo_fini(struct qxl_device *qdev);
267 void qxl_reinit_memslots(struct qxl_device *qdev);
268 int qxl_surf_evict(struct qxl_device *qdev);
269 int qxl_vram_evict(struct qxl_device *qdev);
280 qxl_bo_physical_address(struct qxl_device *qdev, struct qxl_bo *bo, in qxl_bo_physical_address() argument
[all …]
H A Dqxl_release.c62 struct qxl_device *qdev; in qxl_fence_wait() local
65 qdev = container_of(fence->lock, struct qxl_device, release_lock); in qxl_fence_wait()
67 if (!wait_event_timeout(qdev->release_event, in qxl_fence_wait()
69 (qxl_io_notify_oom(qdev), 0)), in qxl_fence_wait()
86 qxl_release_alloc(struct qxl_device *qdev, int type, in qxl_release_alloc() argument
105 spin_lock(&qdev->release_idr_lock); in qxl_release_alloc()
106 handle = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT); in qxl_release_alloc()
107 release->base.seqno = ++qdev->release_seqno; in qxl_release_alloc()
108 spin_unlock(&qdev->release_idr_lock); in qxl_release_alloc()
137 qxl_release_free(struct qxl_device *qdev, in qxl_release_free() argument
[all …]
H A Dqxl_ttm.c44 struct qxl_device *qdev; in qxl_get_qdev() local
47 qdev = container_of(mman, struct qxl_device, mman); in qxl_get_qdev()
48 return qdev; in qxl_get_qdev()
75 struct qxl_device *qdev = qxl_get_qdev(bdev); in qxl_ttm_io_mem_reserve() local
83 mem->bus.offset = (mem->start << PAGE_SHIFT) + qdev->vram_base; in qxl_ttm_io_mem_reserve()
89 qdev->surfaceram_base; in qxl_ttm_io_mem_reserve()
126 struct qxl_device *qdev; in qxl_bo_move_notify() local
131 qdev = to_qxl(qbo->tbo.base.dev); in qxl_bo_move_notify()
134 qxl_surface_evict(qdev, qbo, new_mem ? true : false); in qxl_bo_move_notify()
184 static int qxl_ttm_init_mem_type(struct qxl_device *qdev, in qxl_ttm_init_mem_type() argument
[all …]
H A Dqxl_object.c35 struct qxl_device *qdev; in qxl_ttm_bo_destroy() local
38 qdev = to_qxl(bo->tbo.base.dev); in qxl_ttm_bo_destroy()
40 qxl_surface_evict(qdev, bo, false); in qxl_ttm_bo_destroy()
42 mutex_lock(&qdev->gem.mutex); in qxl_ttm_bo_destroy()
44 mutex_unlock(&qdev->gem.mutex); in qxl_ttm_bo_destroy()
104 int qxl_bo_create(struct qxl_device *qdev, unsigned long size, in qxl_bo_create() argument
123 r = drm_gem_object_init(&qdev->ddev, &bo->tbo.base, size); in qxl_bo_create()
139 r = ttm_bo_init_reserved(&qdev->mman.bdev, &bo->tbo, type, in qxl_bo_create()
144 dev_err(qdev->ddev.dev, in qxl_bo_create()
206 void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, in qxl_bo_kmap_atomic_page() argument
[all …]
H A Dqxl_draw.c31 static int alloc_clips(struct qxl_device *qdev, in alloc_clips() argument
38 return qxl_alloc_bo_reserved(qdev, release, size, clips_bo); in alloc_clips()
44 static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev, in drawable_set_clipping() argument
65 alloc_drawable(struct qxl_device *qdev, struct qxl_release **release) in alloc_drawable() argument
67 return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_drawable), in alloc_drawable()
72 free_drawable(struct qxl_device *qdev, struct qxl_release *release) in free_drawable() argument
74 qxl_release_free(qdev, release); in free_drawable()
79 make_drawable(struct qxl_device *qdev, int surface, uint8_t type, in make_drawable() argument
86 drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); in make_drawable()
114 drawable->mm_time = qdev->rom->mm_clock; in make_drawable()
[all …]
H A Dqxl_ioctl.c40 struct qxl_device *qdev = to_qxl(dev); in qxl_alloc_ioctl() local
50 ret = qxl_gem_object_create_with_handle(qdev, file_priv, in qxl_alloc_ioctl()
66 struct qxl_device *qdev = to_qxl(dev); in qxl_map_ioctl() local
69 return drm_gem_ttm_dumb_map_offset(file_priv, &qdev->ddev, qxl_map->handle, in qxl_map_ioctl()
87 apply_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info) in apply_reloc() argument
91 reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK); in apply_reloc()
92 *(uint64_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = qxl_bo_physical_address(qdev, in apply_reloc()
95 qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page); in apply_reloc()
99 apply_surf_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info) in apply_surf_reloc() argument
107 reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK); in apply_surf_reloc()
[all …]
H A Dqxl_image.c35 qxl_allocate_chunk(struct qxl_device *qdev, in qxl_allocate_chunk() argument
47 ret = qxl_alloc_bo_reserved(qdev, release, chunk_size, &chunk->bo); in qxl_allocate_chunk()
58 qxl_image_alloc_objects(struct qxl_device *qdev, in qxl_image_alloc_objects() argument
72 ret = qxl_alloc_bo_reserved(qdev, release, sizeof(struct qxl_image), &image->bo); in qxl_image_alloc_objects()
78 ret = qxl_allocate_chunk(qdev, release, image, sizeof(struct qxl_data_chunk) + stride * height); in qxl_image_alloc_objects()
88 void qxl_image_free_objects(struct qxl_device *qdev, struct qxl_drm_image *dimage) in qxl_image_free_objects() argument
102 qxl_image_init_helper(struct qxl_device *qdev, in qxl_image_init_helper() argument
129 ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, 0); in qxl_image_init_helper()
134 qxl_bo_kunmap_atomic_page(qdev, chunk_bo, ptr); in qxl_image_init_helper()
148 ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page << PAGE_SHIFT); in qxl_image_init_helper()
[all …]
H A Dqxl_object.h56 extern int qxl_bo_create(struct qxl_device *qdev,
66 void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, int page_offset);
67 void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, void *map);
H A Dqxl_dumb.c35 struct qxl_device *qdev = to_qxl(dev); in qxl_mode_dumb_create() local
64 r = qxl_gem_object_create_with_handle(qdev, file_priv, in qxl_mode_dumb_create()
/linux/net/qrtr/
H A Dsmd.c23 struct qrtr_smd_dev *qdev = dev_get_drvdata(&rpdev->dev); in qcom_smd_qrtr_callback() local
26 if (!qdev) in qcom_smd_qrtr_callback()
29 rc = qrtr_endpoint_post(&qdev->ep, data, len); in qcom_smd_qrtr_callback()
31 dev_err(qdev->dev, "invalid ipcrouter packet\n"); in qcom_smd_qrtr_callback()
42 struct qrtr_smd_dev *qdev = container_of(ep, struct qrtr_smd_dev, ep); in qcom_smd_qrtr_send() local
49 rc = rpmsg_send(qdev->channel, skb->data, skb->len); in qcom_smd_qrtr_send()
61 struct qrtr_smd_dev *qdev; in qcom_smd_qrtr_probe() local
64 qdev = devm_kzalloc(&rpdev->dev, sizeof(*qdev), GFP_KERNEL); in qcom_smd_qrtr_probe()
65 if (!qdev) in qcom_smd_qrtr_probe()
68 qdev->channel = rpdev->ept; in qcom_smd_qrtr_probe()
[all …]
H A Dmhi.c24 struct qrtr_mhi_dev *qdev = dev_get_drvdata(&mhi_dev->dev); in qcom_mhi_qrtr_dl_callback() local
27 if (!qdev || (mhi_res->transaction_status && mhi_res->transaction_status != -ENOTCONN)) in qcom_mhi_qrtr_dl_callback()
36 rc = qrtr_endpoint_post(&qdev->ep, mhi_res->buf_addr,
39 dev_err(qdev->dev, "invalid ipcrouter packet\n"); in qcom_mhi_qrtr_ul_callback()
62 struct qrtr_mhi_dev *qdev = container_of(ep, struct qrtr_mhi_dev, ep); in qcom_mhi_qrtr_send()
72 rc = mhi_queue_skb(qdev->mhi_dev, DMA_TO_DEVICE, skb, skb->len, in qcom_mhi_qrtr_send()
113 struct qrtr_mhi_dev *qdev; in qcom_mhi_qrtr_remove()
116 qdev = devm_kzalloc(&mhi_dev->dev, sizeof(*qdev), GFP_KERNEL);
117 if (!qdev)
50 struct qrtr_mhi_dev *qdev = container_of(ep, struct qrtr_mhi_dev, ep); qcom_mhi_qrtr_send() local
78 struct qrtr_mhi_dev *qdev; qcom_mhi_qrtr_probe() local
108 struct qrtr_mhi_dev *qdev = dev_get_drvdata(&mhi_dev->dev); qcom_mhi_qrtr_remove() local
[all...]
/linux/drivers/md/
H A Draid5.c4911 struct r5dev *pdev, *qdev; in handle_stripe() local
5055 qdev = &sh->dev[sh->qd_idx]; in handle_stripe()
5065 (s.q_failed || ((test_bit(R5_Insync, &qdev->flags) in handle_stripe()
5066 && !test_bit(R5_LOCKED, &qdev->flags) in handle_stripe()
5067 && (test_bit(R5_UPTODATE, &qdev->flags) || in handle_stripe()
5068 test_bit(R5_Discard, &qdev->flags)))))) in handle_stripe()