xref: /freebsd/sys/dev/aic7xxx/aic7xxx_inline.h (revision 77a0943ded95b9e6438f7db70c4a28e4d93946d4)
1 /*
2  * Inline routines shareable across OS platforms.
3  *
4  * Copyright (c) 1994, 1995, 1996, 1997, 1998, 1999, 2000 Justin T. Gibbs.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions, and the following disclaimer,
12  *    without modification.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * Alternatively, this software may be distributed under the terms of the
17  * GNU Public License ("GPL").
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  * $Id: //depot/src/aic7xxx/aic7xxx_inline.h#10 $
32  *
33  * $FreeBSD$
34  */
35 
36 #ifndef _AIC7XXX_INLINE_H_
37 #define _AIC7XXX_INLINE_H_
38 
39 /************************* Sequencer Execution Control ************************/
40 static __inline int  sequencer_paused(struct ahc_softc *ahc);
41 static __inline void ahc_pause_bug_fix(struct ahc_softc *ahc);
42 static __inline void pause_sequencer(struct ahc_softc *ahc);
43 static __inline void unpause_sequencer(struct ahc_softc *ahc);
44 
45 /*
46  * Work around any chip bugs related to halting sequencer execution.
47  * On Ultra2 controllers, we must clear the CIOBUS stretch signal by
48  * reading a register that will set this signal and deassert it.
49  * Without this workaround, if the chip is paused, by an interrupt or
50  * manual pause while accessing scb ram, accesses to certain registers
51  * will hang the system (infinite pci retries).
52  */
53 static __inline void
54 ahc_pause_bug_fix(struct ahc_softc *ahc)
55 {
56 	if ((ahc->features & AHC_ULTRA2) != 0)
57 		(void)ahc_inb(ahc, CCSCBCTL);
58 }
59 
60 /*
61  * Determine whether the sequencer has halted code execution.
62  * Returns non-zero status if the sequencer is stopped.
63  */
64 static __inline int
65 sequencer_paused(struct ahc_softc *ahc)
66 {
67 	return ((ahc_inb(ahc, HCNTRL) & PAUSE) != 0);
68 }
69 
70 /*
71  * Request that the sequencer stop and wait, indefinitely, for it
72  * to stop.  The sequencer will only acknowledge that it is paused
73  * once it has reached an instruction boundary and PAUSEDIS is
74  * cleared in the SEQCTL register.  The sequencer may use PAUSEDIS
75  * for critical sections.
76  */
77 static __inline void
78 pause_sequencer(struct ahc_softc *ahc)
79 {
80 	ahc_outb(ahc, HCNTRL, ahc->pause);
81 
82 	/*
83 	 * Since the sequencer can disable pausing in a critical section, we
84 	 * must loop until it actually stops.
85 	 */
86 	while (sequencer_paused(ahc) == 0)
87 		;
88 
89 	ahc_pause_bug_fix(ahc);
90 }
91 
92 /*
93  * Allow the sequencer to continue program execution.
94  * We check here to ensure that no additional interrupt
95  * sources that would cause the sequencer to halt have been
96  * asserted.  If, for example, a SCSI bus reset is detected
97  * while we are fielding a different, pausing, interrupt type,
98  * we don't want to release the sequencer before going back
99  * into our interrupt handler and dealing with this new
100  * condition.
101  */
102 static __inline void
103 unpause_sequencer(struct ahc_softc *ahc)
104 {
105 	if ((ahc_inb(ahc, INTSTAT) & (SCSIINT | SEQINT | BRKADRINT)) == 0)
106 		ahc_outb(ahc, HCNTRL, ahc->unpause);
107 }
108 
109 /*********************** Untagged Transaction Routines ************************/
110 u_int			ahc_index_busy_tcl(struct ahc_softc *ahc,
111 					   u_int tcl, int unbusy);
112 static __inline void	ahc_freeze_untagged_queues(struct ahc_softc *ahc);
113 static __inline void	ahc_release_untagged_queues(struct ahc_softc *ahc);
114 
115 /*
116  * Block our completion routine from starting the next untagged
117  * transaction for this target or target lun.
118  */
119 static __inline void
120 ahc_freeze_untagged_queues(struct ahc_softc *ahc)
121 {
122 	if ((ahc->features & AHC_SCB_BTT) == 0)
123 		ahc->untagged_queue_lock++;
124 }
125 
126 /*
127  * Allow the next untagged transaction for this target or target lun
128  * to be executed.  We use a counting semaphore to allow the lock
129  * to be acquired recursively.  Once the count drops to zero, the
130  * transaction queues will be run.
131  */
132 static __inline void
133 ahc_release_untagged_queues(struct ahc_softc *ahc)
134 {
135 	if ((ahc->features & AHC_SCB_BTT) == 0) {
136 		ahc->untagged_queue_lock--;
137 		if (ahc->untagged_queue_lock == 0)
138 			ahc_run_untagged_queues(ahc);
139 	}
140 }
141 
142 /************************** Memory mapping routines ***************************/
143 static __inline struct ahc_dma_seg *
144 			ahc_sg_bus_to_virt(struct scb *scb,
145 					   uint32_t sg_busaddr);
146 static __inline uint32_t
147 			ahc_sg_virt_to_bus(struct scb *scb,
148 					   struct ahc_dma_seg *sg);
149 static __inline uint32_t
150 			ahc_hscb_busaddr(struct ahc_softc *ahc, u_int index);
151 
152 static __inline struct ahc_dma_seg *
153 ahc_sg_bus_to_virt(struct scb *scb, uint32_t sg_busaddr)
154 {
155 	int sg_index;
156 
157 	sg_index = (sg_busaddr - scb->sg_list_phys)/sizeof(struct ahc_dma_seg);
158 	/* sg_list_phys points to entry 1, not 0 */
159 	sg_index++;
160 
161 	return (&scb->sg_list[sg_index]);
162 }
163 
164 static __inline uint32_t
165 ahc_sg_virt_to_bus(struct scb *scb, struct ahc_dma_seg *sg)
166 {
167 	int sg_index;
168 
169 	/* sg_list_phys points to entry 1, not 0 */
170 	sg_index = sg - &scb->sg_list[1];
171 
172 	return (scb->sg_list_phys + (sg_index * sizeof(*scb->sg_list)));
173 }
174 
175 static __inline uint32_t
176 ahc_hscb_busaddr(struct ahc_softc *ahc, u_int index)
177 {
178 	return (ahc->scb_data->hscb_busaddr
179 		+ (sizeof(struct hardware_scb) * index));
180 }
181 
182 /******************************** Debugging ***********************************/
183 static __inline char *ahc_name(struct ahc_softc *ahc);
184 
185 static __inline char *
186 ahc_name(struct ahc_softc *ahc)
187 {
188 	return (ahc->name);
189 }
190 
191 /*********************** Miscelaneous Support Functions ***********************/
192 
193 static __inline int	ahc_check_residual(struct scb *scb);
194 static __inline struct ahc_initiator_tinfo *
195 			ahc_fetch_transinfo(struct ahc_softc *ahc,
196 					    char channel, u_int our_id,
197 					    u_int remote_id,
198 					    struct tmode_tstate **tstate);
199 static __inline struct scb*
200 			ahc_get_scb(struct ahc_softc *ahc);
201 static __inline void	ahc_free_scb(struct ahc_softc *ahc, struct scb *scb);
202 static __inline void	ahc_swap_with_next_hscb(struct ahc_softc *ahc,
203 						struct scb *scb);
204 static __inline void	ahc_queue_scb(struct ahc_softc *ahc, struct scb *scb);
205 
206 /*
207  * Determine whether the sequencer reported a residual
208  * for this SCB/transaction.
209  */
210 static __inline int
211 ahc_check_residual(struct scb *scb)
212 {
213 	struct status_pkt *sp;
214 
215 	sp = &scb->hscb->shared_data.status;
216 	if ((scb->hscb->sgptr & SG_RESID_VALID) != 0)
217 		return (1);
218 	return (0);
219 }
220 
221 /*
222  * Return pointers to the transfer negotiation information
223  * for the specified our_id/remote_id pair.
224  */
225 static __inline struct ahc_initiator_tinfo *
226 ahc_fetch_transinfo(struct ahc_softc *ahc, char channel, u_int our_id,
227 		    u_int remote_id, struct tmode_tstate **tstate)
228 {
229 	/*
230 	 * Transfer data structures are stored from the perspective
231 	 * of the target role.  Since the parameters for a connection
232 	 * in the initiator role to a given target are the same as
233 	 * when the roles are reversed, we pretend we are the target.
234 	 */
235 	if (channel == 'B')
236 		our_id += 8;
237 	*tstate = ahc->enabled_targets[our_id];
238 	return (&(*tstate)->transinfo[remote_id]);
239 }
240 
241 /*
242  * Get a free scb. If there are none, see if we can allocate a new SCB.
243  */
244 static __inline struct scb *
245 ahc_get_scb(struct ahc_softc *ahc)
246 {
247 	struct scb *scb;
248 
249 	if ((scb = SLIST_FIRST(&ahc->scb_data->free_scbs)) == NULL) {
250 		ahc_alloc_scbs(ahc);
251 		scb = SLIST_FIRST(&ahc->scb_data->free_scbs);
252 		if (scb == NULL)
253 			return (NULL);
254 	}
255 	SLIST_REMOVE_HEAD(&ahc->scb_data->free_scbs, links.sle);
256 	return (scb);
257 }
258 
259 /*
260  * Return an SCB resource to the free list.
261  */
262 static __inline void
263 ahc_free_scb(struct ahc_softc *ahc, struct scb *scb)
264 {
265 	struct hardware_scb *hscb;
266 
267 	hscb = scb->hscb;
268 	/* Clean up for the next user */
269 	ahc->scb_data->scbindex[hscb->tag] = NULL;
270 	scb->flags = SCB_FREE;
271 	hscb->control = 0;
272 
273 	SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs, scb, links.sle);
274 
275 	/* Notify the OSM that a resource is now available. */
276 	ahc_platform_scb_free(ahc, scb);
277 }
278 
279 static __inline struct scb *
280 ahc_lookup_scb(struct ahc_softc *ahc, u_int tag)
281 {
282 	return (ahc->scb_data->scbindex[tag]);
283 
284 }
285 
286 static __inline void
287 ahc_swap_with_next_hscb(struct ahc_softc *ahc, struct scb *scb)
288 {
289 	struct hardware_scb *q_hscb;
290 	u_int  saved_tag;
291 
292 	/*
293 	 * Our queuing method is a bit tricky.  The card
294 	 * knows in advance which HSCB to download, and we
295 	 * can't disappoint it.  To achieve this, the next
296 	 * SCB to download is saved off in ahc->next_queued_scb.
297 	 * When we are called to queue "an arbitrary scb",
298 	 * we copy the contents of the incoming HSCB to the one
299 	 * the sequencer knows about, swap HSCB pointers and
300 	 * finally assign the SCB to the tag indexed location
301 	 * in the scb_array.  This makes sure that we can still
302 	 * locate the correct SCB by SCB_TAG.
303 	 */
304 	q_hscb = ahc->next_queued_scb->hscb;
305 	saved_tag = q_hscb->tag;
306 	memcpy(q_hscb, scb->hscb, sizeof(*scb->hscb));
307 	if ((scb->flags & SCB_CDB32_PTR) != 0) {
308 		q_hscb->shared_data.cdb_ptr =
309 		    ahc_hscb_busaddr(ahc, q_hscb->tag)
310 		  + offsetof(struct hardware_scb, cdb32);
311 	}
312 	q_hscb->tag = saved_tag;
313 	q_hscb->next = scb->hscb->tag;
314 
315 	/* Now swap HSCB pointers. */
316 	ahc->next_queued_scb->hscb = scb->hscb;
317 	scb->hscb = q_hscb;
318 
319 	/* Now define the mapping from tag to SCB in the scbindex */
320 	ahc->scb_data->scbindex[scb->hscb->tag] = scb;
321 }
322 
323 /*
324  * Tell the sequencer about a new transaction to execute.
325  */
326 static __inline void
327 ahc_queue_scb(struct ahc_softc *ahc, struct scb *scb)
328 {
329 	ahc_swap_with_next_hscb(ahc, scb);
330 
331 	if (scb->hscb->tag == SCB_LIST_NULL
332 	 || scb->hscb->next == SCB_LIST_NULL)
333 		panic("Attempt to queue invalid SCB tag %x:%x\n",
334 		      scb->hscb->tag, scb->hscb->next);
335 
336 	/*
337 	 * Keep a history of SCBs we've downloaded in the qinfifo.
338 	 */
339 	ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag;
340 	if ((ahc->features & AHC_QUEUE_REGS) != 0) {
341 		ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext);
342 	} else {
343 		if ((ahc->features & AHC_AUTOPAUSE) == 0)
344 			pause_sequencer(ahc);
345 		ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext);
346 		if ((ahc->features & AHC_AUTOPAUSE) == 0)
347 			unpause_sequencer(ahc);
348 	}
349 }
350 
351 /************************** Interrupt Processing ******************************/
352 static __inline void ahc_intr(struct ahc_softc *ahc);
353 
354 /*
355  * Catch an interrupt from the adapter
356  */
357 static __inline void
358 ahc_intr(struct ahc_softc *ahc)
359 {
360 	u_int	intstat;
361 
362 	intstat = ahc_inb(ahc, INTSTAT);
363 
364 	/*
365 	 * Any interrupts to process?
366 	 */
367 #if AHC_PCI_CONFIG > 0
368 	if ((intstat & INT_PEND) == 0) {
369 		if ((ahc->chip & AHC_PCI) != 0
370 		 && (ahc->unsolicited_ints > 500)) {
371 			if ((ahc_inb(ahc, ERROR) & PCIERRSTAT) != 0)
372 				ahc_pci_intr(ahc);
373 			ahc->unsolicited_ints = 0;
374 		} else {
375 			ahc->unsolicited_ints++;
376 		}
377 		return;
378 	} else {
379 		ahc->unsolicited_ints = 0;
380 	}
381 #else
382 	if ((intstat & INT_PEND) == 0)
383 		return;
384 #endif
385 
386 	if (intstat & CMDCMPLT) {
387 		ahc_outb(ahc, CLRINT, CLRCMDINT);
388 		/*
389 		 * Ensure that the chip sees that we've cleared
390 		 * this interrupt before we walk the output fifo.
391 		 * Otherwise, we may, due to posted bus writes,
392 		 * clear the interrupt after we finish the scan,
393 		 * and after the sequencer has added new entries
394 		 * and asserted the interrupt again.
395 		 */
396 		ahc_flush_device_writes(ahc);
397 		ahc_run_qoutfifo(ahc);
398 #ifdef AHC_TARGET_MODE
399 		if ((ahc->flags & AHC_TARGETROLE) != 0)
400 			ahc_run_tqinfifo(ahc, /*paused*/FALSE);
401 #endif
402 	}
403 	if (intstat & BRKADRINT) {
404 		ahc_handle_brkadrint(ahc);
405 		/* Fatal error, no more interrupts to handle. */
406 		return;
407 	}
408 
409 	if ((intstat & (SEQINT|SCSIINT)) != 0)
410 		ahc_pause_bug_fix(ahc);
411 
412 	if ((intstat & SEQINT) != 0)
413 		ahc_handle_seqint(ahc, intstat);
414 
415 	if ((intstat & SCSIINT) != 0)
416 		ahc_handle_scsiint(ahc, intstat);
417 }
418 
419 #endif  /* _AIC7XXX_INLINE_H_ */
420