xref: /linux/arch/alpha/include/asm/io.h (revision 3fd6c59042dbba50391e30862beac979491145fe)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ALPHA_IO_H
3 #define __ALPHA_IO_H
4 
5 #ifdef __KERNEL__
6 
7 #include <linux/kernel.h>
8 #include <linux/mm.h>
9 #include <asm/compiler.h>
10 #include <asm/machvec.h>
11 #include <asm/hwrpb.h>
12 
13 /* The generic header contains only prototypes.  Including it ensures that
14    the implementation we have here matches that interface.  */
15 #include <asm-generic/iomap.h>
16 
17 /*
18  * Virtual -> physical identity mapping starts at this offset
19  */
20 #ifdef USE_48_BIT_KSEG
21 #define IDENT_ADDR     0xffff800000000000UL
22 #else
23 #define IDENT_ADDR     0xfffffc0000000000UL
24 #endif
25 
26 /*
27  * We try to avoid hae updates (thus the cache), but when we
28  * do need to update the hae, we need to do it atomically, so
29  * that any interrupts wouldn't get confused with the hae
30  * register not being up-to-date with respect to the hardware
31  * value.
32  */
__set_hae(unsigned long new_hae)33 extern inline void __set_hae(unsigned long new_hae)
34 {
35 	unsigned long flags = swpipl(IPL_MAX);
36 
37 	barrier();
38 
39 	alpha_mv.hae_cache = new_hae;
40 	*alpha_mv.hae_register = new_hae;
41 	mb();
42 	/* Re-read to make sure it was written.  */
43 	new_hae = *alpha_mv.hae_register;
44 
45 	setipl(flags);
46 	barrier();
47 }
48 
set_hae(unsigned long new_hae)49 extern inline void set_hae(unsigned long new_hae)
50 {
51 	if (new_hae != alpha_mv.hae_cache)
52 		__set_hae(new_hae);
53 }
54 
55 /*
56  * Change virtual addresses to physical addresses and vv.
57  */
58 #ifdef USE_48_BIT_KSEG
virt_to_phys(volatile void * address)59 static inline unsigned long virt_to_phys(volatile void *address)
60 {
61 	return (unsigned long)address - IDENT_ADDR;
62 }
63 
phys_to_virt(unsigned long address)64 static inline void * phys_to_virt(unsigned long address)
65 {
66 	return (void *) (address + IDENT_ADDR);
67 }
68 #else
virt_to_phys(volatile void * address)69 static inline unsigned long virt_to_phys(volatile void *address)
70 {
71         unsigned long phys = (unsigned long)address;
72 
73 	/* Sign-extend from bit 41.  */
74 	phys <<= (64 - 41);
75 	phys = (long)phys >> (64 - 41);
76 
77 	/* Crop to the physical address width of the processor.  */
78         phys &= (1ul << hwrpb->pa_bits) - 1;
79 
80         return phys;
81 }
82 
phys_to_virt(unsigned long address)83 static inline void * phys_to_virt(unsigned long address)
84 {
85         return (void *)(IDENT_ADDR + (address & ((1ul << 41) - 1)));
86 }
87 #endif
88 
89 #define virt_to_phys		virt_to_phys
90 #define phys_to_virt		phys_to_virt
91 
92 /* Maximum PIO space address supported?  */
93 #define IO_SPACE_LIMIT 0xffff
94 
95 /*
96  * Change addresses as seen by the kernel (virtual) to addresses as
97  * seen by a device (bus), and vice versa.
98  *
99  * Note that this only works for a limited range of kernel addresses,
100  * and very well may not span all memory.  Consider this interface
101  * deprecated in favour of the DMA-mapping API.
102  */
103 extern unsigned long __direct_map_base;
104 extern unsigned long __direct_map_size;
105 
isa_virt_to_bus(volatile void * address)106 static inline unsigned long __deprecated isa_virt_to_bus(volatile void *address)
107 {
108 	unsigned long phys = virt_to_phys(address);
109 	unsigned long bus = phys + __direct_map_base;
110 	return phys <= __direct_map_size ? bus : 0;
111 }
112 #define isa_virt_to_bus isa_virt_to_bus
113 
isa_bus_to_virt(unsigned long address)114 static inline void * __deprecated isa_bus_to_virt(unsigned long address)
115 {
116 	void *virt;
117 
118 	/* This check is a sanity check but also ensures that bus address 0
119 	   maps to virtual address 0 which is useful to detect null pointers
120 	   (the NCR driver is much simpler if NULL pointers are preserved).  */
121 	address -= __direct_map_base;
122 	virt = phys_to_virt(address);
123 	return (long)address <= 0 ? NULL : virt;
124 }
125 #define isa_bus_to_virt isa_bus_to_virt
126 
127 /*
128  * There are different chipsets to interface the Alpha CPUs to the world.
129  */
130 
131 #define IO_CONCAT(a,b)	_IO_CONCAT(a,b)
132 #define _IO_CONCAT(a,b)	a ## _ ## b
133 
134 #ifdef CONFIG_ALPHA_GENERIC
135 
136 /* In a generic kernel, we always go through the machine vector.  */
137 
138 #define REMAP1(TYPE, NAME, QUAL)					\
139 static inline TYPE generic_##NAME(QUAL void __iomem *addr)		\
140 {									\
141 	return alpha_mv.mv_##NAME(addr);				\
142 }
143 
144 #define REMAP2(TYPE, NAME, QUAL)					\
145 static inline void generic_##NAME(TYPE b, QUAL void __iomem *addr)	\
146 {									\
147 	alpha_mv.mv_##NAME(b, addr);					\
148 }
149 
REMAP1(unsigned int,ioread8,const)150 REMAP1(unsigned int, ioread8, const)
151 REMAP1(unsigned int, ioread16, const)
152 REMAP1(unsigned int, ioread32, const)
153 REMAP1(u64, ioread64, const)
154 REMAP1(u8, readb, const volatile)
155 REMAP1(u16, readw, const volatile)
156 REMAP1(u32, readl, const volatile)
157 REMAP1(u64, readq, const volatile)
158 
159 REMAP2(u8, iowrite8, /**/)
160 REMAP2(u16, iowrite16, /**/)
161 REMAP2(u32, iowrite32, /**/)
162 REMAP2(u64, iowrite64, /**/)
163 REMAP2(u8, writeb, volatile)
164 REMAP2(u16, writew, volatile)
165 REMAP2(u32, writel, volatile)
166 REMAP2(u64, writeq, volatile)
167 
168 #undef REMAP1
169 #undef REMAP2
170 
171 extern inline void __iomem *generic_ioportmap(unsigned long a)
172 {
173 	return alpha_mv.mv_ioportmap(a);
174 }
175 
generic_ioremap(unsigned long a,unsigned long s)176 static inline void __iomem *generic_ioremap(unsigned long a, unsigned long s)
177 {
178 	return alpha_mv.mv_ioremap(a, s);
179 }
180 
generic_iounmap(volatile void __iomem * a)181 static inline void generic_iounmap(volatile void __iomem *a)
182 {
183 	return alpha_mv.mv_iounmap(a);
184 }
185 
generic_is_ioaddr(unsigned long a)186 static inline int generic_is_ioaddr(unsigned long a)
187 {
188 	return alpha_mv.mv_is_ioaddr(a);
189 }
190 
generic_is_mmio(const volatile void __iomem * a)191 static inline int generic_is_mmio(const volatile void __iomem *a)
192 {
193 	return alpha_mv.mv_is_mmio(a);
194 }
195 
196 #define __IO_PREFIX		generic
197 #define generic_trivial_rw_bw	0
198 #define generic_trivial_rw_lq	0
199 #define generic_trivial_io_bw	0
200 #define generic_trivial_io_lq	0
201 #define generic_trivial_iounmap	0
202 
203 #else
204 
205 #if defined(CONFIG_ALPHA_CIA)
206 # include <asm/core_cia.h>
207 #elif defined(CONFIG_ALPHA_IRONGATE)
208 # include <asm/core_irongate.h>
209 #elif defined(CONFIG_ALPHA_MARVEL)
210 # include <asm/core_marvel.h>
211 #elif defined(CONFIG_ALPHA_MCPCIA)
212 # include <asm/core_mcpcia.h>
213 #elif defined(CONFIG_ALPHA_POLARIS)
214 # include <asm/core_polaris.h>
215 #elif defined(CONFIG_ALPHA_T2)
216 # include <asm/core_t2.h>
217 #elif defined(CONFIG_ALPHA_TSUNAMI)
218 # include <asm/core_tsunami.h>
219 #elif defined(CONFIG_ALPHA_TITAN)
220 # include <asm/core_titan.h>
221 #elif defined(CONFIG_ALPHA_WILDFIRE)
222 # include <asm/core_wildfire.h>
223 #else
224 #error "What system is this?"
225 #endif
226 
227 #endif /* GENERIC */
228 
229 /*
230  * We always have external versions of these routines.
231  */
232 extern u8		inb(unsigned long port);
233 extern u16		inw(unsigned long port);
234 extern u32		inl(unsigned long port);
235 extern void		outb(u8 b, unsigned long port);
236 extern void		outw(u16 b, unsigned long port);
237 extern void		outl(u32 b, unsigned long port);
238 #define inb inb
239 #define inw inw
240 #define inl inl
241 #define outb outb
242 #define outw outw
243 #define outl outl
244 
245 extern u8		readb(const volatile void __iomem *addr);
246 extern u16		readw(const volatile void __iomem *addr);
247 extern u32		readl(const volatile void __iomem *addr);
248 extern u64		readq(const volatile void __iomem *addr);
249 extern void		writeb(u8 b, volatile void __iomem *addr);
250 extern void		writew(u16 b, volatile void __iomem *addr);
251 extern void		writel(u32 b, volatile void __iomem *addr);
252 extern void		writeq(u64 b, volatile void __iomem *addr);
253 #define readb readb
254 #define readw readw
255 #define readl readl
256 #define readq readq
257 #define writeb writeb
258 #define writew writew
259 #define writel writel
260 #define writeq writeq
261 
262 extern u8		__raw_readb(const volatile void __iomem *addr);
263 extern u16		__raw_readw(const volatile void __iomem *addr);
264 extern u32		__raw_readl(const volatile void __iomem *addr);
265 extern u64		__raw_readq(const volatile void __iomem *addr);
266 extern void		__raw_writeb(u8 b, volatile void __iomem *addr);
267 extern void		__raw_writew(u16 b, volatile void __iomem *addr);
268 extern void		__raw_writel(u32 b, volatile void __iomem *addr);
269 extern void		__raw_writeq(u64 b, volatile void __iomem *addr);
270 #define __raw_readb __raw_readb
271 #define __raw_readw __raw_readw
272 #define __raw_readl __raw_readl
273 #define __raw_readq __raw_readq
274 #define __raw_writeb __raw_writeb
275 #define __raw_writew __raw_writew
276 #define __raw_writel __raw_writel
277 #define __raw_writeq __raw_writeq
278 
279 /*
280  * Mapping from port numbers to __iomem space is pretty easy.
281  */
282 
283 /* These two have to be extern inline because of the extern prototype from
284    <asm-generic/iomap.h>.  It is not legal to mix "extern" and "static" for
285    the same declaration.  */
ioport_map(unsigned long port,unsigned int size)286 extern inline void __iomem *ioport_map(unsigned long port, unsigned int size)
287 {
288 	return IO_CONCAT(__IO_PREFIX,ioportmap) (port);
289 }
290 
ioport_unmap(void __iomem * addr)291 extern inline void ioport_unmap(void __iomem *addr)
292 {
293 }
294 
295 #define ioport_map ioport_map
296 #define ioport_unmap ioport_unmap
297 
ioremap(unsigned long port,unsigned long size)298 static inline void __iomem *ioremap(unsigned long port, unsigned long size)
299 {
300 	return IO_CONCAT(__IO_PREFIX,ioremap) (port, size);
301 }
302 
303 #define ioremap_wc ioremap
304 
iounmap(volatile void __iomem * addr)305 static inline void iounmap(volatile void __iomem *addr)
306 {
307 	IO_CONCAT(__IO_PREFIX,iounmap)(addr);
308 }
309 
__is_ioaddr(unsigned long addr)310 static inline int __is_ioaddr(unsigned long addr)
311 {
312 	return IO_CONCAT(__IO_PREFIX,is_ioaddr)(addr);
313 }
314 #define __is_ioaddr(a)		__is_ioaddr((unsigned long)(a))
315 
__is_mmio(const volatile void __iomem * addr)316 static inline int __is_mmio(const volatile void __iomem *addr)
317 {
318 	return IO_CONCAT(__IO_PREFIX,is_mmio)(addr);
319 }
320 
321 
322 /*
323  * If the actual I/O bits are sufficiently trivial, then expand inline.
324  */
325 
326 #if IO_CONCAT(__IO_PREFIX,trivial_io_bw)
ioread8(const void __iomem * addr)327 extern inline unsigned int ioread8(const void __iomem *addr)
328 {
329 	unsigned int ret;
330 	mb();
331 	ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr);
332 	mb();
333 	return ret;
334 }
335 
ioread16(const void __iomem * addr)336 extern inline unsigned int ioread16(const void __iomem *addr)
337 {
338 	unsigned int ret;
339 	mb();
340 	ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr);
341 	mb();
342 	return ret;
343 }
344 
iowrite8(u8 b,void __iomem * addr)345 extern inline void iowrite8(u8 b, void __iomem *addr)
346 {
347 	mb();
348 	IO_CONCAT(__IO_PREFIX, iowrite8)(b, addr);
349 }
350 
iowrite16(u16 b,void __iomem * addr)351 extern inline void iowrite16(u16 b, void __iomem *addr)
352 {
353 	mb();
354 	IO_CONCAT(__IO_PREFIX, iowrite16)(b, addr);
355 }
356 
inb(unsigned long port)357 extern inline u8 inb(unsigned long port)
358 {
359 	return ioread8(ioport_map(port, 1));
360 }
361 
inw(unsigned long port)362 extern inline u16 inw(unsigned long port)
363 {
364 	return ioread16(ioport_map(port, 2));
365 }
366 
outb(u8 b,unsigned long port)367 extern inline void outb(u8 b, unsigned long port)
368 {
369 	iowrite8(b, ioport_map(port, 1));
370 }
371 
outw(u16 b,unsigned long port)372 extern inline void outw(u16 b, unsigned long port)
373 {
374 	iowrite16(b, ioport_map(port, 2));
375 }
376 #endif
377 
378 #define ioread8 ioread8
379 #define ioread16 ioread16
380 #define iowrite8 iowrite8
381 #define iowrite16 iowrite16
382 
383 #if IO_CONCAT(__IO_PREFIX,trivial_io_lq)
ioread32(const void __iomem * addr)384 extern inline unsigned int ioread32(const void __iomem *addr)
385 {
386 	unsigned int ret;
387 	mb();
388 	ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr);
389 	mb();
390 	return ret;
391 }
392 
ioread64(const void __iomem * addr)393 extern inline u64 ioread64(const void __iomem *addr)
394 {
395 	unsigned int ret;
396 	mb();
397 	ret = IO_CONCAT(__IO_PREFIX,ioread64)(addr);
398 	mb();
399 	return ret;
400 }
401 
iowrite32(u32 b,void __iomem * addr)402 extern inline void iowrite32(u32 b, void __iomem *addr)
403 {
404 	mb();
405 	IO_CONCAT(__IO_PREFIX, iowrite32)(b, addr);
406 }
407 
iowrite64(u64 b,void __iomem * addr)408 extern inline void iowrite64(u64 b, void __iomem *addr)
409 {
410 	mb();
411 	IO_CONCAT(__IO_PREFIX, iowrite64)(b, addr);
412 }
413 
inl(unsigned long port)414 extern inline u32 inl(unsigned long port)
415 {
416 	return ioread32(ioport_map(port, 4));
417 }
418 
outl(u32 b,unsigned long port)419 extern inline void outl(u32 b, unsigned long port)
420 {
421 	iowrite32(b, ioport_map(port, 4));
422 }
423 #endif
424 
425 #define ioread32 ioread32
426 #define ioread64 ioread64
427 #define iowrite32 iowrite32
428 #define iowrite64 iowrite64
429 
430 #if IO_CONCAT(__IO_PREFIX,trivial_rw_bw) == 1
__raw_readb(const volatile void __iomem * addr)431 extern inline u8 __raw_readb(const volatile void __iomem *addr)
432 {
433 	return IO_CONCAT(__IO_PREFIX,readb)(addr);
434 }
435 
__raw_readw(const volatile void __iomem * addr)436 extern inline u16 __raw_readw(const volatile void __iomem *addr)
437 {
438 	return IO_CONCAT(__IO_PREFIX,readw)(addr);
439 }
440 
__raw_writeb(u8 b,volatile void __iomem * addr)441 extern inline void __raw_writeb(u8 b, volatile void __iomem *addr)
442 {
443 	IO_CONCAT(__IO_PREFIX,writeb)(b, addr);
444 }
445 
__raw_writew(u16 b,volatile void __iomem * addr)446 extern inline void __raw_writew(u16 b, volatile void __iomem *addr)
447 {
448 	IO_CONCAT(__IO_PREFIX,writew)(b, addr);
449 }
450 
readb(const volatile void __iomem * addr)451 extern inline u8 readb(const volatile void __iomem *addr)
452 {
453 	u8 ret;
454 	mb();
455 	ret = __raw_readb(addr);
456 	mb();
457 	return ret;
458 }
459 
readw(const volatile void __iomem * addr)460 extern inline u16 readw(const volatile void __iomem *addr)
461 {
462 	u16 ret;
463 	mb();
464 	ret = __raw_readw(addr);
465 	mb();
466 	return ret;
467 }
468 
writeb(u8 b,volatile void __iomem * addr)469 extern inline void writeb(u8 b, volatile void __iomem *addr)
470 {
471 	mb();
472 	__raw_writeb(b, addr);
473 }
474 
writew(u16 b,volatile void __iomem * addr)475 extern inline void writew(u16 b, volatile void __iomem *addr)
476 {
477 	mb();
478 	__raw_writew(b, addr);
479 }
480 #endif
481 
482 #if IO_CONCAT(__IO_PREFIX,trivial_rw_lq) == 1
__raw_readl(const volatile void __iomem * addr)483 extern inline u32 __raw_readl(const volatile void __iomem *addr)
484 {
485 	return IO_CONCAT(__IO_PREFIX,readl)(addr);
486 }
487 
__raw_readq(const volatile void __iomem * addr)488 extern inline u64 __raw_readq(const volatile void __iomem *addr)
489 {
490 	return IO_CONCAT(__IO_PREFIX,readq)(addr);
491 }
492 
__raw_writel(u32 b,volatile void __iomem * addr)493 extern inline void __raw_writel(u32 b, volatile void __iomem *addr)
494 {
495 	IO_CONCAT(__IO_PREFIX,writel)(b, addr);
496 }
497 
__raw_writeq(u64 b,volatile void __iomem * addr)498 extern inline void __raw_writeq(u64 b, volatile void __iomem *addr)
499 {
500 	IO_CONCAT(__IO_PREFIX,writeq)(b, addr);
501 }
502 
readl(const volatile void __iomem * addr)503 extern inline u32 readl(const volatile void __iomem *addr)
504 {
505 	u32 ret;
506 	mb();
507 	ret = __raw_readl(addr);
508 	mb();
509 	return ret;
510 }
511 
readq(const volatile void __iomem * addr)512 extern inline u64 readq(const volatile void __iomem *addr)
513 {
514 	u64 ret;
515 	mb();
516 	ret = __raw_readq(addr);
517 	mb();
518 	return ret;
519 }
520 
writel(u32 b,volatile void __iomem * addr)521 extern inline void writel(u32 b, volatile void __iomem *addr)
522 {
523 	mb();
524 	__raw_writel(b, addr);
525 }
526 
writeq(u64 b,volatile void __iomem * addr)527 extern inline void writeq(u64 b, volatile void __iomem *addr)
528 {
529 	mb();
530 	__raw_writeq(b, addr);
531 }
532 #endif
533 
534 #define ioread16be(p) swab16(ioread16(p))
535 #define ioread32be(p) swab32(ioread32(p))
536 #define ioread64be(p) swab64(ioread64(p))
537 #define iowrite16be(v,p) iowrite16(swab16(v), (p))
538 #define iowrite32be(v,p) iowrite32(swab32(v), (p))
539 #define iowrite64be(v,p) iowrite64(swab64(v), (p))
540 
541 #define inb_p		inb
542 #define inw_p		inw
543 #define inl_p		inl
544 #define outb_p		outb
545 #define outw_p		outw
546 #define outl_p		outl
547 
548 extern u8 readb_relaxed(const volatile void __iomem *addr);
549 extern u16 readw_relaxed(const volatile void __iomem *addr);
550 extern u32 readl_relaxed(const volatile void __iomem *addr);
551 extern u64 readq_relaxed(const volatile void __iomem *addr);
552 #define readb_relaxed readb_relaxed
553 #define readw_relaxed readw_relaxed
554 #define readl_relaxed readl_relaxed
555 #define readq_relaxed readq_relaxed
556 
557 #if IO_CONCAT(__IO_PREFIX,trivial_io_bw)
readb_relaxed(const volatile void __iomem * addr)558 extern inline u8 readb_relaxed(const volatile void __iomem *addr)
559 {
560 	mb();
561 	return __raw_readb(addr);
562 }
563 
readw_relaxed(const volatile void __iomem * addr)564 extern inline u16 readw_relaxed(const volatile void __iomem *addr)
565 {
566 	mb();
567 	return __raw_readw(addr);
568 }
569 #endif
570 
571 #if IO_CONCAT(__IO_PREFIX,trivial_io_lq)
readl_relaxed(const volatile void __iomem * addr)572 extern inline u32 readl_relaxed(const volatile void __iomem *addr)
573 {
574 	mb();
575 	return __raw_readl(addr);
576 }
577 
readq_relaxed(const volatile void __iomem * addr)578 extern inline u64 readq_relaxed(const volatile void __iomem *addr)
579 {
580 	mb();
581 	return __raw_readq(addr);
582 }
583 #endif
584 
585 #define writeb_relaxed	writeb
586 #define writew_relaxed	writew
587 #define writel_relaxed	writel
588 #define writeq_relaxed	writeq
589 
590 /*
591  * String version of IO memory access ops:
592  */
593 extern void memcpy_fromio(void *, const volatile void __iomem *, long);
594 extern void memcpy_toio(volatile void __iomem *, const void *, long);
595 extern void _memset_c_io(volatile void __iomem *, unsigned long, long);
596 
memset_io(volatile void __iomem * addr,u8 c,long len)597 static inline void memset_io(volatile void __iomem *addr, u8 c, long len)
598 {
599 	_memset_c_io(addr, 0x0101010101010101UL * c, len);
600 }
601 
602 #define __HAVE_ARCH_MEMSETW_IO
memsetw_io(volatile void __iomem * addr,u16 c,long len)603 static inline void memsetw_io(volatile void __iomem *addr, u16 c, long len)
604 {
605 	_memset_c_io(addr, 0x0001000100010001UL * c, len);
606 }
607 
608 #define memset_io memset_io
609 #define memcpy_fromio memcpy_fromio
610 #define memcpy_toio memcpy_toio
611 
612 /*
613  * String versions of in/out ops:
614  */
615 extern void insb (unsigned long port, void *dst, unsigned long count);
616 extern void insw (unsigned long port, void *dst, unsigned long count);
617 extern void insl (unsigned long port, void *dst, unsigned long count);
618 extern void outsb (unsigned long port, const void *src, unsigned long count);
619 extern void outsw (unsigned long port, const void *src, unsigned long count);
620 extern void outsl (unsigned long port, const void *src, unsigned long count);
621 
622 #define insb insb
623 #define insw insw
624 #define insl insl
625 #define outsb outsb
626 #define outsw outsw
627 #define outsl outsl
628 
629 #define RTC_PORT(x)	(0x70 + (x))
630 #define RTC_ALWAYS_BCD	0
631 
632 /*
633  * These get provided from <asm-generic/iomap.h> since alpha does not
634  * select GENERIC_IOMAP.
635  */
636 #define ioread64 ioread64
637 #define iowrite64 iowrite64
638 #define ioread8_rep ioread8_rep
639 #define ioread16_rep ioread16_rep
640 #define ioread32_rep ioread32_rep
641 #define iowrite8_rep iowrite8_rep
642 #define iowrite16_rep iowrite16_rep
643 #define iowrite32_rep iowrite32_rep
644 #define pci_iounmap pci_iounmap
645 
646 #include <asm-generic/io.h>
647 
648 #endif /* __KERNEL__ */
649 
650 #endif /* __ALPHA_IO_H */
651