xref: /linux/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c (revision d457a0e329b0bfd3a1450e0b1a18cd2b47a25a08)
1 /**********************************************************************
2  * Author: Cavium, Inc.
3  *
4  * Contact: support@cavium.com
5  *          Please include "LiquidIO" in the subject.
6  *
7  * Copyright (c) 2003-2016 Cavium, Inc.
8  *
9  * This file is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License, Version 2, as
11  * published by the Free Software Foundation.
12  *
13  * This file is distributed in the hope that it will be useful, but
14  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16  * NONINFRINGEMENT.  See the GNU General Public License for more
17  * details.
18  **********************************************************************/
19 #include <linux/netdevice.h>
20 #include "liquidio_common.h"
21 #include "octeon_droq.h"
22 #include "octeon_iq.h"
23 #include "response_manager.h"
24 #include "octeon_device.h"
25 #include "octeon_mem_ops.h"
26 
27 #define MEMOPS_IDX   BAR1_INDEX_DYNAMIC_MAP
28 
29 #ifdef __BIG_ENDIAN_BITFIELD
30 static inline void
31 octeon_toggle_bar1_swapmode(struct octeon_device *oct, u32 idx)
32 {
33 	u32 mask;
34 
35 	mask = oct->fn_list.bar1_idx_read(oct, idx);
36 	mask = (mask & 0x2) ? (mask & ~2) : (mask | 2);
37 	oct->fn_list.bar1_idx_write(oct, idx, mask);
38 }
39 #else
40 #define octeon_toggle_bar1_swapmode(oct, idx)
41 #endif
42 
43 static void
44 octeon_pci_fastwrite(struct octeon_device *oct, u8 __iomem *mapped_addr,
45 		     u8 *hostbuf, u32 len)
46 {
47 	while ((len) && ((unsigned long)mapped_addr) & 7) {
48 		writeb(*(hostbuf++), mapped_addr++);
49 		len--;
50 	}
51 
52 	octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX);
53 
54 	while (len >= 8) {
55 		writeq(*((u64 *)hostbuf), mapped_addr);
56 		mapped_addr += 8;
57 		hostbuf += 8;
58 		len -= 8;
59 	}
60 
61 	octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX);
62 
63 	while (len--)
64 		writeb(*(hostbuf++), mapped_addr++);
65 }
66 
67 static void
68 octeon_pci_fastread(struct octeon_device *oct, u8 __iomem *mapped_addr,
69 		    u8 *hostbuf, u32 len)
70 {
71 	while ((len) && ((unsigned long)mapped_addr) & 7) {
72 		*(hostbuf++) = readb(mapped_addr++);
73 		len--;
74 	}
75 
76 	octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX);
77 
78 	while (len >= 8) {
79 		*((u64 *)hostbuf) = readq(mapped_addr);
80 		mapped_addr += 8;
81 		hostbuf += 8;
82 		len -= 8;
83 	}
84 
85 	octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX);
86 
87 	while (len--)
88 		*(hostbuf++) = readb(mapped_addr++);
89 }
90 
91 /* Core mem read/write with temporary bar1 settings. */
92 /* op = 1 to read, op = 0 to write. */
93 static void
94 __octeon_pci_rw_core_mem(struct octeon_device *oct, u64 addr,
95 			 u8 *hostbuf, u32 len, u32 op)
96 {
97 	u32 copy_len = 0, index_reg_val = 0;
98 	unsigned long flags;
99 	u8 __iomem *mapped_addr;
100 	u64 static_mapping_base;
101 
102 	static_mapping_base = oct->console_nb_info.dram_region_base;
103 
104 	if (static_mapping_base &&
105 	    static_mapping_base == (addr & ~(OCTEON_BAR1_ENTRY_SIZE - 1ULL))) {
106 		int bar1_index = oct->console_nb_info.bar1_index;
107 
108 		mapped_addr = oct->mmio[1].hw_addr
109 			+ (bar1_index << ilog2(OCTEON_BAR1_ENTRY_SIZE))
110 			+ (addr & (OCTEON_BAR1_ENTRY_SIZE - 1ULL));
111 
112 		if (op)
113 			octeon_pci_fastread(oct, mapped_addr, hostbuf, len);
114 		else
115 			octeon_pci_fastwrite(oct, mapped_addr, hostbuf, len);
116 
117 		return;
118 	}
119 
120 	spin_lock_irqsave(&oct->mem_access_lock, flags);
121 
122 	/* Save the original index reg value. */
123 	index_reg_val = oct->fn_list.bar1_idx_read(oct, MEMOPS_IDX);
124 	do {
125 		oct->fn_list.bar1_idx_setup(oct, addr, MEMOPS_IDX, 1);
126 		mapped_addr = oct->mmio[1].hw_addr
127 		    + (MEMOPS_IDX << 22) + (addr & 0x3fffff);
128 
129 		/* If operation crosses a 4MB boundary, split the transfer
130 		 * at the 4MB
131 		 * boundary.
132 		 */
133 		if (((addr + len - 1) & ~(0x3fffff)) != (addr & ~(0x3fffff))) {
134 			copy_len = (u32)(((addr & ~(0x3fffff)) +
135 				   (MEMOPS_IDX << 22)) - addr);
136 		} else {
137 			copy_len = len;
138 		}
139 
140 		if (op) {	/* read from core */
141 			octeon_pci_fastread(oct, mapped_addr, hostbuf,
142 					    copy_len);
143 		} else {
144 			octeon_pci_fastwrite(oct, mapped_addr, hostbuf,
145 					     copy_len);
146 		}
147 
148 		len -= copy_len;
149 		addr += copy_len;
150 		hostbuf += copy_len;
151 
152 	} while (len);
153 
154 	oct->fn_list.bar1_idx_write(oct, MEMOPS_IDX, index_reg_val);
155 
156 	spin_unlock_irqrestore(&oct->mem_access_lock, flags);
157 }
158 
159 void
160 octeon_pci_read_core_mem(struct octeon_device *oct,
161 			 u64 coreaddr,
162 			 u8 *buf,
163 			 u32 len)
164 {
165 	__octeon_pci_rw_core_mem(oct, coreaddr, buf, len, 1);
166 }
167 EXPORT_SYMBOL_GPL(octeon_pci_read_core_mem);
168 
169 void
170 octeon_pci_write_core_mem(struct octeon_device *oct,
171 			  u64 coreaddr,
172 			  const u8 *buf,
173 			  u32 len)
174 {
175 	__octeon_pci_rw_core_mem(oct, coreaddr, (u8 *)buf, len, 0);
176 }
177 EXPORT_SYMBOL_GPL(octeon_pci_write_core_mem);
178 
179 u64 octeon_read_device_mem64(struct octeon_device *oct, u64 coreaddr)
180 {
181 	__be64 ret;
182 
183 	__octeon_pci_rw_core_mem(oct, coreaddr, (u8 *)&ret, 8, 1);
184 
185 	return be64_to_cpu(ret);
186 }
187 EXPORT_SYMBOL_GPL(octeon_read_device_mem64);
188 
189 u32 octeon_read_device_mem32(struct octeon_device *oct, u64 coreaddr)
190 {
191 	__be32 ret;
192 
193 	__octeon_pci_rw_core_mem(oct, coreaddr, (u8 *)&ret, 4, 1);
194 
195 	return be32_to_cpu(ret);
196 }
197 EXPORT_SYMBOL_GPL(octeon_read_device_mem32);
198 
199 void octeon_write_device_mem32(struct octeon_device *oct, u64 coreaddr,
200 			       u32 val)
201 {
202 	__be32 t = cpu_to_be32(val);
203 
204 	__octeon_pci_rw_core_mem(oct, coreaddr, (u8 *)&t, 4, 0);
205 }
206 EXPORT_SYMBOL_GPL(octeon_write_device_mem32);
207