Lines Matching full:cb
31 struct mei_cl_cb *cb, *next; in mei_irq_compl_handler() local
34 list_for_each_entry_safe(cb, next, cmpl_list, list) { in mei_irq_compl_handler()
35 cl = cb->cl; in mei_irq_compl_handler()
36 list_del_init(&cb->list); in mei_irq_compl_handler()
39 mei_cl_complete(cl, cb); in mei_irq_compl_handler()
99 struct mei_cl_cb *cb; in mei_cl_irq_read_msg() local
115 cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list); in mei_cl_irq_read_msg()
116 if (!cb) { in mei_cl_irq_read_msg()
118 cl_err(dev, cl, "pending read cb not found\n"); in mei_cl_irq_read_msg()
121 cb = mei_cl_alloc_cb(cl, mei_cl_mtu(cl), MEI_FOP_READ, cl->fp); in mei_cl_irq_read_msg()
122 if (!cb) in mei_cl_irq_read_msg()
124 list_add_tail(&cb->list, &cl->rd_pending); in mei_cl_irq_read_msg()
136 cb->ext_hdr = (struct mei_ext_hdr *)kzalloc(sizeof(*gsc_f2h), GFP_KERNEL); in mei_cl_irq_read_msg()
137 if (!cb->ext_hdr) { in mei_cl_irq_read_msg()
138 cb->status = -ENOMEM; in mei_cl_irq_read_msg()
146 cb->status = -EPROTO; in mei_cl_irq_read_msg()
155 cb->status = -EPROTO; in mei_cl_irq_read_msg()
162 if (cb->vtag && cb->vtag != vtag_hdr->vtag) { in mei_cl_irq_read_msg()
164 cb->vtag, vtag_hdr->vtag); in mei_cl_irq_read_msg()
165 cb->status = -EPROTO; in mei_cl_irq_read_msg()
168 cb->vtag = vtag_hdr->vtag; in mei_cl_irq_read_msg()
176 cb->status = -EPROTO; in mei_cl_irq_read_msg()
181 cl_err(dev, cl, "no data allowed in cb with gsc\n"); in mei_cl_irq_read_msg()
182 cb->status = -EPROTO; in mei_cl_irq_read_msg()
187 cb->status = -EPROTO; in mei_cl_irq_read_msg()
190 memcpy(cb->ext_hdr, gsc_f2h, ext_hdr_len); in mei_cl_irq_read_msg()
195 cb->status = -ENODEV; in mei_cl_irq_read_msg()
202 buf_sz = length + cb->buf_idx; in mei_cl_irq_read_msg()
204 if (buf_sz < cb->buf_idx) { in mei_cl_irq_read_msg()
206 length, cb->buf_idx); in mei_cl_irq_read_msg()
207 cb->status = -EMSGSIZE; in mei_cl_irq_read_msg()
211 if (cb->buf.size < buf_sz) { in mei_cl_irq_read_msg()
213 cb->buf.size, length, cb->buf_idx); in mei_cl_irq_read_msg()
214 cb->status = -EMSGSIZE; in mei_cl_irq_read_msg()
219 mei_dma_ring_read(dev, cb->buf.data + cb->buf_idx, length); in mei_cl_irq_read_msg()
221 mei_read_slots(dev, cb->buf.data + cb->buf_idx, 0); in mei_cl_irq_read_msg()
223 mei_read_slots(dev, cb->buf.data + cb->buf_idx, length); in mei_cl_irq_read_msg()
226 cb->buf_idx += length; in mei_cl_irq_read_msg()
229 cl_dbg(dev, cl, "completed read length = %zu\n", cb->buf_idx); in mei_cl_irq_read_msg()
230 list_move_tail(&cb->list, cmpl_list); in mei_cl_irq_read_msg()
238 if (cb) in mei_cl_irq_read_msg()
239 list_move_tail(&cb->list, cmpl_list); in mei_cl_irq_read_msg()
248 * @cb: callback block.
253 static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb, in mei_cl_irq_disconnect_rsp() argument
270 list_move_tail(&cb->list, cmpl_list); in mei_cl_irq_disconnect_rsp()
280 * @cb: callback block.
285 static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb, in mei_cl_irq_read() argument
307 cb->buf_idx = 0; in mei_cl_irq_read()
308 list_move_tail(&cb->list, cmpl_list); in mei_cl_irq_read()
314 list_move_tail(&cb->list, &cl->rd_pending); in mei_cl_irq_read()
509 struct mei_cl_cb *cb, *next; in mei_irq_write_handler() local
524 /* complete all waiting for write CB */ in mei_irq_write_handler()
525 dev_dbg(&dev->dev, "complete all waiting for write cb.\n"); in mei_irq_write_handler()
527 list_for_each_entry_safe(cb, next, &dev->write_waiting_list, list) { in mei_irq_write_handler()
528 cl = cb->cl; in mei_irq_write_handler()
533 list_move_tail(&cb->list, cmpl_list); in mei_irq_write_handler()
536 /* complete control write list CB */ in mei_irq_write_handler()
537 dev_dbg(&dev->dev, "complete control write list cb.\n"); in mei_irq_write_handler()
538 list_for_each_entry_safe(cb, next, &dev->ctrl_wr_list, list) { in mei_irq_write_handler()
539 cl = cb->cl; in mei_irq_write_handler()
540 switch (cb->fop_type) { in mei_irq_write_handler()
543 ret = mei_cl_irq_disconnect(cl, cb, cmpl_list); in mei_irq_write_handler()
550 ret = mei_cl_irq_read(cl, cb, cmpl_list); in mei_irq_write_handler()
557 ret = mei_cl_irq_connect(cl, cb, cmpl_list); in mei_irq_write_handler()
564 ret = mei_cl_irq_disconnect_rsp(cl, cb, cmpl_list); in mei_irq_write_handler()
571 ret = mei_cl_irq_notify(cl, cb, cmpl_list); in mei_irq_write_handler()
576 ret = mei_cl_irq_dma_map(cl, cb, cmpl_list); in mei_irq_write_handler()
581 ret = mei_cl_irq_dma_unmap(cl, cb, cmpl_list); in mei_irq_write_handler()
590 /* complete write list CB */ in mei_irq_write_handler()
591 dev_dbg(&dev->dev, "complete write list cb.\n"); in mei_irq_write_handler()
592 list_for_each_entry_safe(cb, next, &dev->write_list, list) { in mei_irq_write_handler()
593 cl = cb->cl; in mei_irq_write_handler()
594 ret = mei_cl_irq_write(cl, cb, cmpl_list); in mei_irq_write_handler()