xref: /linux/drivers/firewire/core.h (revision f8324e20f8289dffc646d64366332e05eaacab25)
1 #ifndef _FIREWIRE_CORE_H
2 #define _FIREWIRE_CORE_H
3 
4 #include <linux/fs.h>
5 #include <linux/list.h>
6 #include <linux/idr.h>
7 #include <linux/mm_types.h>
8 #include <linux/rwsem.h>
9 #include <linux/slab.h>
10 #include <linux/types.h>
11 
12 #include <asm/atomic.h>
13 
14 struct device;
15 struct fw_card;
16 struct fw_device;
17 struct fw_iso_buffer;
18 struct fw_iso_context;
19 struct fw_iso_packet;
20 struct fw_node;
21 struct fw_packet;
22 
23 
24 /* -card */
25 
26 /* bitfields within the PHY registers */
27 #define PHY_LINK_ACTIVE		0x80
28 #define PHY_CONTENDER		0x40
29 #define PHY_BUS_RESET		0x40
30 #define PHY_EXTENDED_REGISTERS	0xe0
31 #define PHY_BUS_SHORT_RESET	0x40
32 #define PHY_INT_STATUS_BITS	0x3c
33 #define PHY_ENABLE_ACCEL	0x02
34 #define PHY_ENABLE_MULTI	0x01
35 #define PHY_PAGE_SELECT		0xe0
36 
37 #define BANDWIDTH_AVAILABLE_INITIAL	4915
38 #define BROADCAST_CHANNEL_INITIAL	(1 << 31 | 31)
39 #define BROADCAST_CHANNEL_VALID		(1 << 30)
40 
41 struct fw_card_driver {
42 	/*
43 	 * Enable the given card with the given initial config rom.
44 	 * This function is expected to activate the card, and either
45 	 * enable the PHY or set the link_on bit and initiate a bus
46 	 * reset.
47 	 */
48 	int (*enable)(struct fw_card *card,
49 		      const __be32 *config_rom, size_t length);
50 
51 	int (*update_phy_reg)(struct fw_card *card, int address,
52 			      int clear_bits, int set_bits);
53 
54 	/*
55 	 * Update the config rom for an enabled card.  This function
56 	 * should change the config rom that is presented on the bus
57 	 * and initiate a bus reset.
58 	 */
59 	int (*set_config_rom)(struct fw_card *card,
60 			      const __be32 *config_rom, size_t length);
61 
62 	void (*send_request)(struct fw_card *card, struct fw_packet *packet);
63 	void (*send_response)(struct fw_card *card, struct fw_packet *packet);
64 	/* Calling cancel is valid once a packet has been submitted. */
65 	int (*cancel_packet)(struct fw_card *card, struct fw_packet *packet);
66 
67 	/*
68 	 * Allow the specified node ID to do direct DMA out and in of
69 	 * host memory.  The card will disable this for all node when
70 	 * a bus reset happens, so driver need to reenable this after
71 	 * bus reset.  Returns 0 on success, -ENODEV if the card
72 	 * doesn't support this, -ESTALE if the generation doesn't
73 	 * match.
74 	 */
75 	int (*enable_phys_dma)(struct fw_card *card,
76 			       int node_id, int generation);
77 
78 	u32 (*get_cycle_time)(struct fw_card *card);
79 
80 	struct fw_iso_context *
81 	(*allocate_iso_context)(struct fw_card *card,
82 				int type, int channel, size_t header_size);
83 	void (*free_iso_context)(struct fw_iso_context *ctx);
84 
85 	int (*start_iso)(struct fw_iso_context *ctx,
86 			 s32 cycle, u32 sync, u32 tags);
87 
88 	int (*queue_iso)(struct fw_iso_context *ctx,
89 			 struct fw_iso_packet *packet,
90 			 struct fw_iso_buffer *buffer,
91 			 unsigned long payload);
92 
93 	int (*stop_iso)(struct fw_iso_context *ctx);
94 };
95 
96 void fw_card_initialize(struct fw_card *card,
97 		const struct fw_card_driver *driver, struct device *device);
98 int fw_card_add(struct fw_card *card,
99 		u32 max_receive, u32 link_speed, u64 guid);
100 void fw_core_remove_card(struct fw_card *card);
101 int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset);
102 int fw_compute_block_crc(__be32 *block);
103 void fw_schedule_bm_work(struct fw_card *card, unsigned long delay);
104 
105 static inline struct fw_card *fw_card_get(struct fw_card *card)
106 {
107 	kref_get(&card->kref);
108 
109 	return card;
110 }
111 
112 void fw_card_release(struct kref *kref);
113 
114 static inline void fw_card_put(struct fw_card *card)
115 {
116 	kref_put(&card->kref, fw_card_release);
117 }
118 
119 
120 /* -cdev */
121 
122 extern const struct file_operations fw_device_ops;
123 
124 void fw_device_cdev_update(struct fw_device *device);
125 void fw_device_cdev_remove(struct fw_device *device);
126 
127 
128 /* -device */
129 
130 extern struct rw_semaphore fw_device_rwsem;
131 extern struct idr fw_device_idr;
132 extern int fw_cdev_major;
133 
134 struct fw_device *fw_device_get_by_devt(dev_t devt);
135 int fw_device_set_broadcast_channel(struct device *dev, void *gen);
136 void fw_node_event(struct fw_card *card, struct fw_node *node, int event);
137 
138 
139 /* -iso */
140 
141 int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma);
142 void fw_iso_resource_manage(struct fw_card *card, int generation,
143 			    u64 channels_mask, int *channel, int *bandwidth,
144 			    bool allocate, __be32 buffer[2]);
145 
146 
147 /* -topology */
148 
149 enum {
150 	FW_NODE_CREATED,
151 	FW_NODE_UPDATED,
152 	FW_NODE_DESTROYED,
153 	FW_NODE_LINK_ON,
154 	FW_NODE_LINK_OFF,
155 	FW_NODE_INITIATED_RESET,
156 };
157 
158 struct fw_node {
159 	u16 node_id;
160 	u8 color;
161 	u8 port_count;
162 	u8 link_on:1;
163 	u8 initiated_reset:1;
164 	u8 b_path:1;
165 	u8 phy_speed:2;	/* As in the self ID packet. */
166 	u8 max_speed:2;	/* Minimum of all phy-speeds on the path from the
167 			 * local node to this node. */
168 	u8 max_depth:4;	/* Maximum depth to any leaf node */
169 	u8 max_hops:4;	/* Max hops in this sub tree */
170 	atomic_t ref_count;
171 
172 	/* For serializing node topology into a list. */
173 	struct list_head link;
174 
175 	/* Upper layer specific data. */
176 	void *data;
177 
178 	struct fw_node *ports[0];
179 };
180 
181 static inline struct fw_node *fw_node_get(struct fw_node *node)
182 {
183 	atomic_inc(&node->ref_count);
184 
185 	return node;
186 }
187 
188 static inline void fw_node_put(struct fw_node *node)
189 {
190 	if (atomic_dec_and_test(&node->ref_count))
191 		kfree(node);
192 }
193 
194 void fw_core_handle_bus_reset(struct fw_card *card, int node_id,
195 			      int generation, int self_id_count, u32 *self_ids);
196 void fw_destroy_nodes(struct fw_card *card);
197 
198 /*
199  * Check whether new_generation is the immediate successor of old_generation.
200  * Take counter roll-over at 255 (as per OHCI) into account.
201  */
202 static inline bool is_next_generation(int new_generation, int old_generation)
203 {
204 	return (new_generation & 0xff) == ((old_generation + 1) & 0xff);
205 }
206 
207 
208 /* -transaction */
209 
210 #define TCODE_IS_READ_REQUEST(tcode)	(((tcode) & ~1) == 4)
211 #define TCODE_IS_BLOCK_PACKET(tcode)	(((tcode) &  1) != 0)
212 #define TCODE_IS_REQUEST(tcode)		(((tcode) &  2) == 0)
213 #define TCODE_IS_RESPONSE(tcode)	(((tcode) &  2) != 0)
214 #define TCODE_HAS_REQUEST_DATA(tcode)	(((tcode) & 12) != 4)
215 #define TCODE_HAS_RESPONSE_DATA(tcode)	(((tcode) & 12) != 0)
216 
217 #define LOCAL_BUS 0xffc0
218 
219 void fw_core_handle_request(struct fw_card *card, struct fw_packet *request);
220 void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet);
221 void fw_fill_response(struct fw_packet *response, u32 *request_header,
222 		      int rcode, void *payload, size_t length);
223 void fw_send_phy_config(struct fw_card *card,
224 			int node_id, int generation, int gap_count);
225 
226 #endif /* _FIREWIRE_CORE_H */
227