Lines Matching refs:c67x00
136 static void dbg_td(struct c67x00_hcd *c67x00, struct c67x00_td *td, char *msg) in dbg_td() argument
138 struct device *dev = c67x00_hcd_dev(c67x00); in dbg_td()
159 static inline u16 c67x00_get_current_frame_number(struct c67x00_hcd *c67x00) in c67x00_get_current_frame_number() argument
161 return c67x00_ll_husb_get_frame(c67x00->sie) & HOST_FRAME_MASK; in c67x00_get_current_frame_number()
198 static void c67x00_release_urb(struct c67x00_hcd *c67x00, struct urb *urb) in c67x00_release_urb() argument
205 c67x00->urb_count--; in c67x00_release_urb()
208 c67x00->urb_iso_count--; in c67x00_release_urb()
209 if (c67x00->urb_iso_count == 0) in c67x00_release_urb()
210 c67x00->max_frame_bw = MAX_FRAME_BW_STD; in c67x00_release_urb()
218 list_for_each_entry(td, &c67x00->td_list, td_list) in c67x00_release_urb()
231 c67x00_ep_data_alloc(struct c67x00_hcd *c67x00, struct urb *urb) in c67x00_ep_data_alloc() argument
237 c67x00->current_frame = c67x00_get_current_frame_number(c67x00); in c67x00_ep_data_alloc()
242 if (frame_after(c67x00->current_frame, ep_data->next_frame)) in c67x00_ep_data_alloc()
244 frame_add(c67x00->current_frame, 1); in c67x00_ep_data_alloc()
263 ep_data->next_frame = frame_add(c67x00->current_frame, 1); in c67x00_ep_data_alloc()
269 list_add(&ep_data->node, &c67x00->list[type]); in c67x00_ep_data_alloc()
273 list_for_each_entry(prev, &c67x00->list[type], node) { in c67x00_ep_data_alloc()
307 struct c67x00_hcd *c67x00 = hcd_to_c67x00_hcd(hcd); in c67x00_endpoint_disable() local
311 dev_warn(c67x00_hcd_dev(c67x00), "error: urb list not empty\n"); in c67x00_endpoint_disable()
313 spin_lock_irqsave(&c67x00->lock, flags); in c67x00_endpoint_disable()
318 spin_unlock_irqrestore(&c67x00->lock, flags); in c67x00_endpoint_disable()
323 reinit_completion(&c67x00->endpoint_disable); in c67x00_endpoint_disable()
324 c67x00_sched_kick(c67x00); in c67x00_endpoint_disable()
325 wait_for_completion_timeout(&c67x00->endpoint_disable, 1 * HZ); in c67x00_endpoint_disable()
327 spin_lock_irqsave(&c67x00->lock, flags); in c67x00_endpoint_disable()
330 spin_unlock_irqrestore(&c67x00->lock, flags); in c67x00_endpoint_disable()
348 struct c67x00_hcd *c67x00 = hcd_to_c67x00_hcd(hcd); in c67x00_urb_enqueue() local
358 spin_lock_irqsave(&c67x00->lock, flags); in c67x00_urb_enqueue()
374 urbp->ep_data = c67x00_ep_data_alloc(c67x00, urb); in c67x00_urb_enqueue()
397 if (c67x00->urb_iso_count == 0) in c67x00_urb_enqueue()
398 c67x00->max_frame_bw = MAX_FRAME_BW_ISO; in c67x00_urb_enqueue()
399 c67x00->urb_iso_count++; in c67x00_urb_enqueue()
423 if (!c67x00->urb_count++) in c67x00_urb_enqueue()
424 c67x00_ll_hpi_enable_sofeop(c67x00->sie); in c67x00_urb_enqueue()
426 c67x00_sched_kick(c67x00); in c67x00_urb_enqueue()
427 spin_unlock_irqrestore(&c67x00->lock, flags); in c67x00_urb_enqueue()
434 spin_unlock_irqrestore(&c67x00->lock, flags); in c67x00_urb_enqueue()
443 struct c67x00_hcd *c67x00 = hcd_to_c67x00_hcd(hcd); in c67x00_urb_dequeue() local
447 spin_lock_irqsave(&c67x00->lock, flags); in c67x00_urb_dequeue()
452 c67x00_release_urb(c67x00, urb); in c67x00_urb_dequeue()
455 spin_unlock(&c67x00->lock); in c67x00_urb_dequeue()
457 spin_lock(&c67x00->lock); in c67x00_urb_dequeue()
459 spin_unlock_irqrestore(&c67x00->lock, flags); in c67x00_urb_dequeue()
464 spin_unlock_irqrestore(&c67x00->lock, flags); in c67x00_urb_dequeue()
474 c67x00_giveback_urb(struct c67x00_hcd *c67x00, struct urb *urb, int status) in c67x00_giveback_urb() argument
486 c67x00_release_urb(c67x00, urb); in c67x00_giveback_urb()
487 usb_hcd_unlink_urb_from_ep(c67x00_hcd_to_hcd(c67x00), urb); in c67x00_giveback_urb()
488 spin_unlock(&c67x00->lock); in c67x00_giveback_urb()
489 usb_hcd_giveback_urb(c67x00_hcd_to_hcd(c67x00), urb, status); in c67x00_giveback_urb()
490 spin_lock(&c67x00->lock); in c67x00_giveback_urb()
495 static int c67x00_claim_frame_bw(struct c67x00_hcd *c67x00, struct urb *urb, in c67x00_claim_frame_bw() argument
535 if (unlikely(bit_time + c67x00->bandwidth_allocated >= in c67x00_claim_frame_bw()
536 c67x00->max_frame_bw)) in c67x00_claim_frame_bw()
539 if (unlikely(c67x00->next_td_addr + CY_TD_SIZE >= in c67x00_claim_frame_bw()
540 c67x00->td_base_addr + SIE_TD_SIZE)) in c67x00_claim_frame_bw()
543 if (unlikely(c67x00->next_buf_addr + len >= in c67x00_claim_frame_bw()
544 c67x00->buf_base_addr + SIE_TD_BUF_SIZE)) in c67x00_claim_frame_bw()
548 if (unlikely(bit_time + c67x00->periodic_bw_allocated >= in c67x00_claim_frame_bw()
549 MAX_PERIODIC_BW(c67x00->max_frame_bw))) in c67x00_claim_frame_bw()
551 c67x00->periodic_bw_allocated += bit_time; in c67x00_claim_frame_bw()
554 c67x00->bandwidth_allocated += bit_time; in c67x00_claim_frame_bw()
563 static int c67x00_create_td(struct c67x00_hcd *c67x00, struct urb *urb, in c67x00_create_td() argument
573 if (c67x00_claim_frame_bw(c67x00, urb, len, usb_pipeisoc(urb->pipe) in c67x00_create_td()
585 !(c67x00->low_speed_ports & (1 << urbp->port))) in c67x00_create_td()
610 td->td_addr = c67x00->next_td_addr; in c67x00_create_td()
611 c67x00->next_td_addr = c67x00->next_td_addr + CY_TD_SIZE; in c67x00_create_td()
614 td->ly_base_addr = __cpu_to_le16(c67x00->next_buf_addr); in c67x00_create_td()
615 td->port_length = __cpu_to_le16((c67x00->sie->sie_num << 15) | in c67x00_create_td()
624 td->next_td_addr = __cpu_to_le16(c67x00->next_td_addr); in c67x00_create_td()
631 c67x00->next_buf_addr += (len + 1) & ~0x01; /* properly align */ in c67x00_create_td()
633 list_add_tail(&td->td_list, &c67x00->td_list); in c67x00_create_td()
645 static int c67x00_add_data_urb(struct c67x00_hcd *c67x00, struct urb *urb) in c67x00_add_data_urb() argument
674 ret = c67x00_create_td(c67x00, urb, td_buf, len, pid, toggle, in c67x00_add_data_urb()
691 static int c67x00_add_ctrl_urb(struct c67x00_hcd *c67x00, struct urb *urb) in c67x00_add_ctrl_urb() argument
699 ret = c67x00_create_td(c67x00, urb, urb->setup_packet, in c67x00_add_ctrl_urb()
709 ret = c67x00_add_data_urb(c67x00, urb); in c67x00_add_ctrl_urb()
717 ret = c67x00_create_td(c67x00, urb, NULL, 0, pid, 1, in c67x00_add_ctrl_urb()
730 static int c67x00_add_int_urb(struct c67x00_hcd *c67x00, struct urb *urb) in c67x00_add_int_urb() argument
734 if (frame_after_eq(c67x00->current_frame, urbp->ep_data->next_frame)) { in c67x00_add_int_urb()
737 return c67x00_add_data_urb(c67x00, urb); in c67x00_add_int_urb()
742 static int c67x00_add_iso_urb(struct c67x00_hcd *c67x00, struct urb *urb) in c67x00_add_iso_urb() argument
746 if (frame_after_eq(c67x00->current_frame, urbp->ep_data->next_frame)) { in c67x00_add_iso_urb()
757 ret = c67x00_create_td(c67x00, urb, td_buf, len, pid, 0, in c67x00_add_iso_urb()
760 dev_dbg(c67x00_hcd_dev(c67x00), "create failed: %d\n", in c67x00_add_iso_urb()
765 c67x00_giveback_urb(c67x00, urb, 0); in c67x00_add_iso_urb()
777 static void c67x00_fill_from_list(struct c67x00_hcd *c67x00, int type, in c67x00_fill_from_list() argument
784 list_for_each_entry(ep_data, &c67x00->list[type], node) { in c67x00_fill_from_list()
791 add(c67x00, urb); in c67x00_fill_from_list()
796 static void c67x00_fill_frame(struct c67x00_hcd *c67x00) in c67x00_fill_frame() argument
801 if (!list_empty(&c67x00->td_list)) { in c67x00_fill_frame()
802 dev_warn(c67x00_hcd_dev(c67x00), in c67x00_fill_frame()
804 list_for_each_entry_safe(td, ttd, &c67x00->td_list, td_list) { in c67x00_fill_frame()
805 dbg_td(c67x00, td, "Unprocessed td"); in c67x00_fill_frame()
811 c67x00->bandwidth_allocated = 0; in c67x00_fill_frame()
812 c67x00->periodic_bw_allocated = 0; in c67x00_fill_frame()
814 c67x00->next_td_addr = c67x00->td_base_addr; in c67x00_fill_frame()
815 c67x00->next_buf_addr = c67x00->buf_base_addr; in c67x00_fill_frame()
818 c67x00_fill_from_list(c67x00, PIPE_ISOCHRONOUS, c67x00_add_iso_urb); in c67x00_fill_frame()
819 c67x00_fill_from_list(c67x00, PIPE_INTERRUPT, c67x00_add_int_urb); in c67x00_fill_frame()
820 c67x00_fill_from_list(c67x00, PIPE_CONTROL, c67x00_add_ctrl_urb); in c67x00_fill_frame()
821 c67x00_fill_from_list(c67x00, PIPE_BULK, c67x00_add_data_urb); in c67x00_fill_frame()
830 c67x00_parse_td(struct c67x00_hcd *c67x00, struct c67x00_td *td) in c67x00_parse_td() argument
832 c67x00_ll_read_mem_le16(c67x00->sie->dev, in c67x00_parse_td()
836 c67x00_ll_read_mem_le16(c67x00->sie->dev, td_ly_base_addr(td), in c67x00_parse_td()
840 static int c67x00_td_to_error(struct c67x00_hcd *c67x00, struct c67x00_td *td) in c67x00_td_to_error() argument
843 dbg_td(c67x00, td, "ERROR_FLAG"); in c67x00_td_to_error()
851 dbg_td(c67x00, td, "TIMEOUT"); in c67x00_td_to_error()
889 static inline void c67x00_clear_pipe(struct c67x00_hcd *c67x00, in c67x00_clear_pipe() argument
895 while (td->td_list.next != &c67x00->td_list) { in c67x00_clear_pipe()
907 static void c67x00_handle_successful_td(struct c67x00_hcd *c67x00, in c67x00_handle_successful_td() argument
932 c67x00_clear_pipe(c67x00, td); in c67x00_handle_successful_td()
938 c67x00_giveback_urb(c67x00, urb, 0); in c67x00_handle_successful_td()
946 c67x00_clear_pipe(c67x00, td); in c67x00_handle_successful_td()
947 c67x00_giveback_urb(c67x00, urb, 0); in c67x00_handle_successful_td()
953 static void c67x00_handle_isoc(struct c67x00_hcd *c67x00, struct c67x00_td *td) in c67x00_handle_isoc() argument
967 urb->iso_frame_desc[cnt].status = c67x00_td_to_error(c67x00, td); in c67x00_handle_isoc()
969 c67x00_giveback_urb(c67x00, urb, 0); in c67x00_handle_isoc()
978 static inline void c67x00_check_td_list(struct c67x00_hcd *c67x00) in c67x00_check_td_list() argument
985 list_for_each_entry_safe(td, tmp, &c67x00->td_list, td_list) { in c67x00_check_td_list()
987 c67x00_parse_td(c67x00, td); in c67x00_check_td_list()
995 c67x00_handle_isoc(c67x00, td); in c67x00_check_td_list()
1003 c67x00_giveback_urb(c67x00, urb, in c67x00_check_td_list()
1004 c67x00_td_to_error(c67x00, td)); in c67x00_check_td_list()
1018 c67x00_giveback_urb(c67x00, urb, -EOVERFLOW); in c67x00_check_td_list()
1024 c67x00_handle_successful_td(c67x00, td); in c67x00_check_td_list()
1028 c67x00_clear_pipe(c67x00, td); in c67x00_check_td_list()
1041 static inline int c67x00_all_tds_processed(struct c67x00_hcd *c67x00) in c67x00_all_tds_processed() argument
1046 return !c67x00_ll_husb_get_current_td(c67x00->sie); in c67x00_all_tds_processed()
1052 static void c67x00_send_td(struct c67x00_hcd *c67x00, struct c67x00_td *td) in c67x00_send_td() argument
1057 c67x00_ll_write_mem_le16(c67x00->sie->dev, td_ly_base_addr(td), in c67x00_send_td()
1060 c67x00_ll_write_mem_le16(c67x00->sie->dev, in c67x00_send_td()
1064 static void c67x00_send_frame(struct c67x00_hcd *c67x00) in c67x00_send_frame() argument
1068 if (list_empty(&c67x00->td_list)) in c67x00_send_frame()
1069 dev_warn(c67x00_hcd_dev(c67x00), in c67x00_send_frame()
1073 list_for_each_entry(td, &c67x00->td_list, td_list) { in c67x00_send_frame()
1074 if (td->td_list.next == &c67x00->td_list) in c67x00_send_frame()
1077 c67x00_send_td(c67x00, td); in c67x00_send_frame()
1080 c67x00_ll_husb_set_current_td(c67x00->sie, c67x00->td_base_addr); in c67x00_send_frame()
1088 static void c67x00_do_work(struct c67x00_hcd *c67x00) in c67x00_do_work() argument
1090 spin_lock(&c67x00->lock); in c67x00_do_work()
1092 if (!c67x00_all_tds_processed(c67x00)) in c67x00_do_work()
1095 c67x00_check_td_list(c67x00); in c67x00_do_work()
1099 complete(&c67x00->endpoint_disable); in c67x00_do_work()
1101 if (!list_empty(&c67x00->td_list)) in c67x00_do_work()
1104 c67x00->current_frame = c67x00_get_current_frame_number(c67x00); in c67x00_do_work()
1105 if (c67x00->current_frame == c67x00->last_frame) in c67x00_do_work()
1107 c67x00->last_frame = c67x00->current_frame; in c67x00_do_work()
1110 if (!c67x00->urb_count) { in c67x00_do_work()
1111 c67x00_ll_hpi_disable_sofeop(c67x00->sie); in c67x00_do_work()
1115 c67x00_fill_frame(c67x00); in c67x00_do_work()
1116 if (!list_empty(&c67x00->td_list)) in c67x00_do_work()
1118 c67x00_send_frame(c67x00); in c67x00_do_work()
1121 spin_unlock(&c67x00->lock); in c67x00_do_work()
1128 struct c67x00_hcd *c67x00; in c67x00_sched_work() local
1130 c67x00 = container_of(work, struct c67x00_hcd, work); in c67x00_sched_work()
1131 c67x00_do_work(c67x00); in c67x00_sched_work()
1134 void c67x00_sched_kick(struct c67x00_hcd *c67x00) in c67x00_sched_kick() argument
1136 queue_work(system_highpri_wq, &c67x00->work); in c67x00_sched_kick()
1139 int c67x00_sched_start_scheduler(struct c67x00_hcd *c67x00) in c67x00_sched_start_scheduler() argument
1141 INIT_WORK(&c67x00->work, c67x00_sched_work); in c67x00_sched_start_scheduler()
1145 void c67x00_sched_stop_scheduler(struct c67x00_hcd *c67x00) in c67x00_sched_stop_scheduler() argument
1147 cancel_work_sync(&c67x00->work); in c67x00_sched_stop_scheduler()