Lines Matching +full:hi +full:- +full:speed

1 // SPDX-License-Identifier: GPL-2.0-or-later
5 * Copyright (C) 2004-2006 Kristian Hoegsberg <krh@bitplanet.net>
13 #include <linux/firewire-constants.h>
31 #include "packet-header-definitions.h"
32 #include "phy-packet-definitions.h"
41 if (t->is_split_transaction) in try_cancel_split_timeout()
42 return timer_delete(&t->split_timeout_timer); in try_cancel_split_timeout()
54 scoped_guard(spinlock_irqsave, &card->transactions.lock) { in close_transaction()
55 list_for_each_entry(iter, &card->transactions.list, link) { in close_transaction()
58 list_del_init(&iter->link); in close_transaction()
59 card->transactions.tlabel_mask &= ~(1ULL << iter->tlabel); in close_transaction()
68 return -ENOENT; in close_transaction()
70 if (!t->with_tstamp) { in close_transaction()
71 t->callback.without_tstamp(card, rcode, NULL, 0, t->callback_data); in close_transaction()
73 t->callback.with_tstamp(card, rcode, t->packet.timestamp, response_tstamp, NULL, 0, in close_transaction()
74 t->callback_data); in close_transaction()
95 if (card->driver->cancel_packet(card, &transaction->packet) == 0) in fw_cancel_transaction()
103 if (transaction->packet.ack == 0) { in fw_cancel_transaction()
105 tstamp = transaction->packet.timestamp; in fw_cancel_transaction()
120 struct fw_card *card = t->card; in split_transaction_timeout_callback()
122 scoped_guard(spinlock_irqsave, &card->transactions.lock) { in split_transaction_timeout_callback()
123 if (list_empty(&t->link)) in split_transaction_timeout_callback()
125 list_del(&t->link); in split_transaction_timeout_callback()
126 card->transactions.tlabel_mask &= ~(1ULL << t->tlabel); in split_transaction_timeout_callback()
129 if (!t->with_tstamp) { in split_transaction_timeout_callback()
130 t->callback.without_tstamp(card, RCODE_CANCELLED, NULL, 0, t->callback_data); in split_transaction_timeout_callback()
132 t->callback.with_tstamp(card, RCODE_CANCELLED, t->packet.timestamp, in split_transaction_timeout_callback()
133 t->split_timeout_cycle, NULL, 0, t->callback_data); in split_transaction_timeout_callback()
142 if (list_empty(&t->link) || WARN_ON(t->is_split_transaction)) in start_split_transaction_timeout()
145 t->is_split_transaction = true; in start_split_transaction_timeout()
149 scoped_guard(spinlock_irqsave, &card->split_timeout.lock) in start_split_transaction_timeout()
150 delta = card->split_timeout.jiffies; in start_split_transaction_timeout()
151 mod_timer(&t->split_timeout_timer, jiffies + delta); in start_split_transaction_timeout()
162 trace_async_request_outbound_complete((uintptr_t)t, card->index, packet->generation, in transmit_complete_callback()
163 packet->speed, status, packet->timestamp); in transmit_complete_callback()
167 close_transaction(t, card, RCODE_COMPLETE, packet->timestamp); in transmit_complete_callback()
173 scoped_guard(spinlock_irqsave, &card->split_timeout.lock) { in transmit_complete_callback()
174 t->split_timeout_cycle = in transmit_complete_callback()
175 compute_split_timeout_timestamp(card, packet->timestamp) & 0xffff; in transmit_complete_callback()
183 close_transaction(t, card, RCODE_BUSY, packet->timestamp); in transmit_complete_callback()
186 close_transaction(t, card, RCODE_DATA_ERROR, packet->timestamp); in transmit_complete_callback()
189 close_transaction(t, card, RCODE_TYPE_ERROR, packet->timestamp); in transmit_complete_callback()
196 close_transaction(t, card, status, packet->timestamp); in transmit_complete_callback()
202 int destination_id, int source_id, int generation, int speed, in fw_fill_request() argument
210 packet->header[0] = destination_id; in fw_fill_request()
211 isoc_header_set_data_length(packet->header, length); in fw_fill_request()
212 isoc_header_set_tcode(packet->header, TCODE_STREAM_DATA); in fw_fill_request()
213 packet->header_length = 4; in fw_fill_request()
214 packet->payload = payload; in fw_fill_request()
215 packet->payload_length = length; in fw_fill_request()
226 async_header_set_retry(packet->header, RETRY_X); in fw_fill_request()
227 async_header_set_tlabel(packet->header, tlabel); in fw_fill_request()
228 async_header_set_tcode(packet->header, tcode); in fw_fill_request()
229 async_header_set_destination(packet->header, destination_id); in fw_fill_request()
230 async_header_set_source(packet->header, source_id); in fw_fill_request()
231 async_header_set_offset(packet->header, offset); in fw_fill_request()
235 async_header_set_quadlet_data(packet->header, *(u32 *)payload); in fw_fill_request()
236 packet->header_length = 16; in fw_fill_request()
237 packet->payload_length = 0; in fw_fill_request()
242 async_header_set_data_length(packet->header, length); in fw_fill_request()
243 async_header_set_extended_tcode(packet->header, ext_tcode); in fw_fill_request()
244 packet->header_length = 16; in fw_fill_request()
245 packet->payload = payload; in fw_fill_request()
246 packet->payload_length = length; in fw_fill_request()
250 packet->header_length = 12; in fw_fill_request()
251 packet->payload_length = 0; in fw_fill_request()
255 async_header_set_data_length(packet->header, length); in fw_fill_request()
256 async_header_set_extended_tcode(packet->header, ext_tcode); in fw_fill_request()
257 packet->header_length = 16; in fw_fill_request()
258 packet->payload_length = 0; in fw_fill_request()
265 packet->speed = speed; in fw_fill_request()
266 packet->generation = generation; in fw_fill_request()
267 packet->ack = 0; in fw_fill_request()
268 packet->payload_mapped = false; in fw_fill_request()
272 __must_hold(&card->transactions.lock) in allocate_tlabel()
276 lockdep_assert_held(&card->transactions.lock); in allocate_tlabel()
278 tlabel = card->transactions.current_tlabel; in allocate_tlabel()
279 while (card->transactions.tlabel_mask & (1ULL << tlabel)) { in allocate_tlabel()
281 if (tlabel == card->transactions.current_tlabel) in allocate_tlabel()
282 return -EBUSY; in allocate_tlabel()
285 card->transactions.current_tlabel = (tlabel + 1) & 0x3f; in allocate_tlabel()
286 card->transactions.tlabel_mask |= 1ULL << tlabel; in allocate_tlabel()
292 * __fw_send_request() - submit a request packet for transmission to generate callback for response
299 * @speed: transmission speed
312 * In case of lock requests, specify one of the firewire-core specific %TCODE_
322 * The payload buffer at @data is going to be DMA-mapped except in case of
332 * the firewire-core specific %RCODE_SEND_ERROR. The other firewire-core
343 int destination_id, int generation, int speed, unsigned long long offset, in __fw_send_request() argument
356 scoped_guard(spinlock_irqsave, &card->transactions.lock) in __fw_send_request()
375 t->node_id = destination_id; in __fw_send_request()
376 t->tlabel = tlabel; in __fw_send_request()
377 t->card = card; in __fw_send_request()
378 t->is_split_transaction = false; in __fw_send_request()
379 timer_setup(&t->split_timeout_timer, split_transaction_timeout_callback, 0); in __fw_send_request()
380 t->callback = callback; in __fw_send_request()
381 t->with_tstamp = with_tstamp; in __fw_send_request()
382 t->callback_data = callback_data; in __fw_send_request()
383 t->packet.callback = transmit_complete_callback; in __fw_send_request()
387 scoped_guard(spinlock_irqsave, &card->lock) { in __fw_send_request()
389 fw_fill_request(&t->packet, tcode, t->tlabel, destination_id, card->node_id, in __fw_send_request()
390 generation, speed, offset, payload, length); in __fw_send_request()
395 scoped_guard(spinlock_irqsave, &card->transactions.lock) in __fw_send_request()
396 list_add_tail(&t->link, &card->transactions.list); in __fw_send_request()
399 trace_async_request_outbound_initiate((uintptr_t)t, card->index, generation, speed, in __fw_send_request()
400 t->packet.header, payload, in __fw_send_request()
403 card->driver->send_request(card, &t->packet); in __fw_send_request()
419 memcpy(d->payload, payload, length); in transaction_callback()
420 d->rcode = rcode; in transaction_callback()
421 complete(&d->done); in transaction_callback()
425 * fw_run_transaction() - send request and sleep until transaction is completed
430 * @speed: transmission speed
441 int generation, int speed, unsigned long long offset, in fw_run_transaction() argument
450 fw_send_request(card, &t, tcode, destination_id, generation, speed, in fw_run_transaction()
465 trace_async_phy_outbound_complete((uintptr_t)packet, card->index, packet->generation, status, in transmit_phy_packet_callback()
466 packet->timestamp); in transmit_phy_packet_callback()
473 .speed = SCODE_100,
491 gap_count = card->driver->read_phy_reg(card, 1); in fw_send_phy_config()
510 trace_async_phy_outbound_initiate((uintptr_t)&phy_config_packet, card->index, in fw_send_phy_config()
514 card->driver->send_request(card, &phy_config_packet); in fw_send_phy_config()
524 if (handler->offset < offset + length && in lookup_overlapping_address_handler()
525 offset < handler->offset + handler->length) in lookup_overlapping_address_handler()
535 return handler->offset <= offset && in is_enclosing_handler()
536 offset + length <= handler->offset + handler->length; in is_enclosing_handler()
576 complete(&handler->done); in complete_address_handler()
581 kref_get(&handler->kref); in get_address_handler()
586 return kref_put(&handler->kref, complete_address_handler); in put_address_handler()
590 * fw_core_add_address_handler() - register for incoming requests
594 * region->start, ->end, and handler->length have to be quadlet-aligned.
602 * Return value: 0 on success, non-zero otherwise.
605 * fw_core_add_address_handler() and is returned in handler->offset.
613 int ret = -EBUSY; in fw_core_add_address_handler()
615 if (region->start & 0xffff000000000003ULL || in fw_core_add_address_handler()
616 region->start >= region->end || in fw_core_add_address_handler()
617 region->end > 0x0001000000000000ULL || in fw_core_add_address_handler()
618 handler->length & 3 || in fw_core_add_address_handler()
619 handler->length == 0) in fw_core_add_address_handler()
620 return -EINVAL; in fw_core_add_address_handler()
624 handler->offset = region->start; in fw_core_add_address_handler()
625 while (handler->offset + handler->length <= region->end) { in fw_core_add_address_handler()
626 if (is_in_fcp_region(handler->offset, handler->length)) in fw_core_add_address_handler()
631 handler->offset, handler->length); in fw_core_add_address_handler()
633 handler->offset += other->length; in fw_core_add_address_handler()
635 init_completion(&handler->done); in fw_core_add_address_handler()
636 kref_init(&handler->kref); in fw_core_add_address_handler()
637 list_add_tail_rcu(&handler->link, &address_handler_list); in fw_core_add_address_handler()
648 * fw_core_remove_address_handler() - unregister an address handler
653 * When fw_core_remove_address_handler() returns, @handler->callback() is
659 list_del_rcu(&handler->link); in fw_core_remove_address_handler()
664 wait_for_completion(&handler->done); in fw_core_remove_address_handler()
680 kref_get(&request->kref); in fw_request_get()
692 kref_put(&request->kref, release_request); in fw_request_put()
700 trace_async_response_outbound_complete((uintptr_t)request, card->index, packet->generation, in free_response_callback()
701 packet->speed, status, packet->timestamp); in free_response_callback()
703 // Decrease the reference count since not at in-flight. in free_response_callback()
714 tcode = async_header_get_tcode(r->request_header); in fw_get_response_length()
725 data_length = async_header_get_data_length(r->request_header); in fw_get_response_length()
729 ext_tcode = async_header_get_extended_tcode(r->request_header); in fw_get_response_length()
730 data_length = async_header_get_data_length(r->request_header); in fw_get_response_length()
756 async_header_set_retry(response->header, RETRY_1); in fw_fill_response()
757 async_header_set_tlabel(response->header, tlabel); in fw_fill_response()
758 async_header_set_destination(response->header, destination); in fw_fill_response()
759 async_header_set_source(response->header, source); in fw_fill_response()
760 async_header_set_rcode(response->header, rcode); in fw_fill_response()
761 response->header[2] = 0; // The field is reserved. in fw_fill_response()
766 async_header_set_tcode(response->header, TCODE_WRITE_RESPONSE); in fw_fill_response()
767 response->header_length = 12; in fw_fill_response()
768 response->payload_length = 0; in fw_fill_response()
772 async_header_set_tcode(response->header, TCODE_READ_QUADLET_RESPONSE); in fw_fill_response()
774 async_header_set_quadlet_data(response->header, *(u32 *)payload); in fw_fill_response()
776 async_header_set_quadlet_data(response->header, 0); in fw_fill_response()
777 response->header_length = 16; in fw_fill_response()
778 response->payload_length = 0; in fw_fill_response()
783 async_header_set_tcode(response->header, tcode + 2); in fw_fill_response()
784 async_header_set_data_length(response->header, length); in fw_fill_response()
785 async_header_set_extended_tcode(response->header, extended_tcode); in fw_fill_response()
786 response->header_length = 16; in fw_fill_response()
787 response->payload = payload; in fw_fill_response()
788 response->payload_length = length; in fw_fill_response()
795 response->payload_mapped = false; in fw_fill_response()
801 __must_hold(&card->split_timeout.lock) in compute_split_timeout_timestamp()
806 lockdep_assert_held(&card->split_timeout.lock); in compute_split_timeout_timestamp()
808 cycles = card->split_timeout.cycles; in compute_split_timeout_timestamp()
825 request_tcode = async_header_get_tcode(p->header); in allocate_request()
828 data = &p->header[3]; in allocate_request()
834 data = p->payload; in allocate_request()
835 length = async_header_get_data_length(p->header); in allocate_request()
845 length = async_header_get_data_length(p->header); in allocate_request()
849 fw_notice(card, "ERROR - corrupt request received - %08x %08x %08x\n", in allocate_request()
850 p->header[0], p->header[1], p->header[2]); in allocate_request()
857 kref_init(&request->kref); in allocate_request()
861 scoped_guard(spinlock_irqsave, &card->split_timeout.lock) in allocate_request()
862 request->response.timestamp = compute_split_timeout_timestamp(card, p->timestamp); in allocate_request()
864 request->response.speed = p->speed; in allocate_request()
865 request->response.generation = p->generation; in allocate_request()
866 request->response.ack = 0; in allocate_request()
867 request->response.callback = free_response_callback; in allocate_request()
868 request->ack = p->ack; in allocate_request()
869 request->timestamp = p->timestamp; in allocate_request()
870 request->length = length; in allocate_request()
872 memcpy(request->data, data, length); in allocate_request()
874 memcpy(request->request_header, p->header, sizeof(p->header)); in allocate_request()
880 * fw_send_response: - send response packet for asynchronous transaction.
895 if (request->ack != ACK_PENDING || in fw_send_response()
896 HEADER_DESTINATION_IS_BROADCAST(request->request_header)) { in fw_send_response()
902 data = request->data; in fw_send_response()
906 fw_fill_response(&request->response, request->request_header, rcode, data, data_length); in fw_send_response()
908 // Increase the reference count so that the object is kept during in-flight. in fw_send_response()
911 trace_async_response_outbound_initiate((uintptr_t)request, card->index, in fw_send_response()
912 request->response.generation, request->response.speed, in fw_send_response()
913 request->response.header, data, in fw_send_response()
916 card->driver->send_response(card, &request->response); in fw_send_response()
921 * fw_get_request_speed() - returns speed at which the @request was received
926 return request->response.speed; in fw_get_request_speed()
942 return request->timestamp; in fw_request_get_timestamp()
954 destination = async_header_get_destination(p->header); in handle_exclusive_region_request()
955 source = async_header_get_source(p->header); in handle_exclusive_region_request()
956 tcode = async_header_get_tcode(p->header); in handle_exclusive_region_request()
958 tcode = 0x10 + async_header_get_extended_tcode(p->header); in handle_exclusive_region_request()
962 request->length); in handle_exclusive_region_request()
972 // Outside the RCU read-side critical section. Without spinlock. With reference count. in handle_exclusive_region_request()
973 handler->address_callback(card, request, tcode, destination, source, p->generation, offset, in handle_exclusive_region_request()
974 request->data, request->length, handler->callback_data); in handle_exclusive_region_request()
992 request->length > 0x200) { in handle_fcp_region_request()
998 tcode = async_header_get_tcode(p->header); in handle_fcp_region_request()
999 destination = async_header_get_destination(p->header); in handle_fcp_region_request()
1000 source = async_header_get_source(p->header); in handle_fcp_region_request()
1014 if (is_enclosing_handler(handler, offset, request->length)) { in handle_fcp_region_request()
1049 handler->address_callback(card, request, tcode, destination, source, in handle_fcp_region_request()
1050 p->generation, offset, request->data, in handle_fcp_region_request()
1051 request->length, handler->callback_data); in handle_fcp_region_request()
1067 if (p->ack != ACK_PENDING && p->ack != ACK_COMPLETE) in fw_core_handle_request()
1070 tcode = async_header_get_tcode(p->header); in fw_core_handle_request()
1072 trace_async_phy_inbound((uintptr_t)p, card->index, p->generation, p->ack, p->timestamp, in fw_core_handle_request()
1073 p->header[1], p->header[2]); in fw_core_handle_request()
1084 trace_async_request_inbound((uintptr_t)request, card->index, p->generation, p->speed, in fw_core_handle_request()
1085 p->ack, p->timestamp, p->header, request->data, in fw_core_handle_request()
1086 tcode_is_read_request(tcode) ? 0 : request->length / 4); in fw_core_handle_request()
1088 offset = async_header_get_offset(p->header); in fw_core_handle_request()
1090 if (!is_in_fcp_region(offset, request->length)) in fw_core_handle_request()
1105 tcode = async_header_get_tcode(p->header); in fw_core_handle_response()
1106 tlabel = async_header_get_tlabel(p->header); in fw_core_handle_response()
1107 source = async_header_get_source(p->header); in fw_core_handle_response()
1108 rcode = async_header_get_rcode(p->header); in fw_core_handle_response()
1117 data = (u32 *) &p->header[3]; in fw_core_handle_response()
1128 data = p->payload; in fw_core_handle_response()
1129 data_length = async_header_get_data_length(p->header); in fw_core_handle_response()
1141 scoped_guard(spinlock_irqsave, &card->transactions.lock) { in fw_core_handle_response()
1142 list_for_each_entry(iter, &card->transactions.list, link) { in fw_core_handle_response()
1143 if (iter->node_id == source && iter->tlabel == tlabel) { in fw_core_handle_response()
1145 list_del_init(&iter->link); in fw_core_handle_response()
1146 card->transactions.tlabel_mask &= ~(1ULL << iter->tlabel); in fw_core_handle_response()
1154 trace_async_response_inbound((uintptr_t)t, card->index, p->generation, p->speed, p->ack, in fw_core_handle_response()
1155 p->timestamp, p->header, data, data_length / 4); in fw_core_handle_response()
1167 card->driver->cancel_packet(card, &t->packet); in fw_core_handle_response()
1169 if (!t->with_tstamp) { in fw_core_handle_response()
1170 t->callback.without_tstamp(card, rcode, data, data_length, t->callback_data); in fw_core_handle_response()
1172 t->callback.with_tstamp(card, rcode, t->packet.timestamp, p->timestamp, data, in fw_core_handle_response()
1173 data_length, t->callback_data); in fw_core_handle_response()
1179 * fw_rcode_string - convert a firewire result code to an error description
1225 start = (offset - topology_map_region.start) / 4; in handle_topology_map()
1229 scoped_guard(spinlock_irqsave, &card->topology_map.lock) in handle_topology_map()
1230 memcpy(payload, &card->topology_map.buffer[start], length); in handle_topology_map()
1245 __must_hold(&card->split_timeout.lock) in update_split_timeout()
1249 cycles = card->split_timeout.hi * 8000 + (card->split_timeout.lo >> 19); in update_split_timeout()
1254 card->split_timeout.cycles = cycles; in update_split_timeout()
1255 card->split_timeout.jiffies = isoc_cycles_to_jiffies(cycles); in update_split_timeout()
1269 if (!card->priority_budget_implemented) { in handle_registers()
1277 * per IEEE 1394-2008 8.3.22.3, not IEEE 1394.1-2004 3.2.8 in handle_registers()
1278 * and 9.6, but interoperable with IEEE 1394.1-2004 bridges in handle_registers()
1288 *data = cpu_to_be32(card->driver->read_csr(card, reg)); in handle_registers()
1290 card->driver->write_csr(card, reg, be32_to_cpu(*data)); in handle_registers()
1297 card->driver->write_csr(card, CSR_STATE_CLEAR, in handle_registers()
1305 *data = cpu_to_be32(card->split_timeout.hi); in handle_registers()
1310 scoped_guard(spinlock_irqsave, &card->split_timeout.lock) { in handle_registers()
1311 card->split_timeout.hi = be32_to_cpu(*data) & 7; in handle_registers()
1321 *data = cpu_to_be32(card->split_timeout.lo); in handle_registers()
1326 scoped_guard(spinlock_irqsave, &card->split_timeout.lock) { in handle_registers()
1327 card->split_timeout.lo = be32_to_cpu(*data) & 0xfff80000; in handle_registers()
1337 *data = card->maint_utility_register; in handle_registers()
1339 card->maint_utility_register = *data; in handle_registers()
1346 *data = cpu_to_be32(card->broadcast_channel); in handle_registers()
1348 card->broadcast_channel = in handle_registers()
1442 return -ENOMEM; in fw_core_init()